mirror of
https://github.com/DISARMFoundation/DISARMframeworks.git
synced 2025-01-17 10:17:33 -05:00
Corrected columns for urls sheet, added back asset into technique names, and tidied up mapping of existing incidents to amended techniques
This commit is contained in:
parent
964938bd15
commit
84f0700c2e
Binary file not shown.
Binary file not shown.
@ -568,7 +568,7 @@ function handleTechniqueClick(box) {
|
||||
<td id="T0137.006">T0137.006 Manipulate Stocks<input type="checkbox" id="T0137.006check" onclick="handleTechniqueClick('T0137.006')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0088.001">T0088.001 Develop AI-Generated Audio (Deepfakes)<input type="checkbox" id="T0088.001check" onclick="handleTechniqueClick('T0088.001')"></td>
|
||||
<td id="T0152.002">T0152.002 Blog<input type="checkbox" id="T0152.002check" onclick="handleTechniqueClick('T0152.002')"></td>
|
||||
<td id="T0152.002">T0152.002 Blog Asset<input type="checkbox" id="T0152.002check" onclick="handleTechniqueClick('T0152.002')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -604,7 +604,7 @@ function handleTechniqueClick(box) {
|
||||
<td id="T0138.001">T0138.001 Encourage<input type="checkbox" id="T0138.001check" onclick="handleTechniqueClick('T0138.001')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0089">T0089 Obtain Private Documents<input type="checkbox" id="T0089check" onclick="handleTechniqueClick('T0089')"></td>
|
||||
<td id="T0152.004">T0152.004 Website<input type="checkbox" id="T0152.004check" onclick="handleTechniqueClick('T0152.004')"></td>
|
||||
<td id="T0152.004">T0152.004 Website Asset<input type="checkbox" id="T0152.004check" onclick="handleTechniqueClick('T0152.004')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -657,7 +657,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0139">T0139 Dissuade from Acting<input type="checkbox" id="T0139check" onclick="handleTechniqueClick('T0139')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0146">T0146 Account<input type="checkbox" id="T0146check" onclick="handleTechniqueClick('T0146')"></td>
|
||||
<td id="T0146">T0146 Account Asset<input type="checkbox" id="T0146check" onclick="handleTechniqueClick('T0146')"></td>
|
||||
<td id="T0152.007">T0152.007 Audio Platform<input type="checkbox" id="T0152.007check" onclick="handleTechniqueClick('T0152.007')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -675,7 +675,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0139.001">T0139.001 Discourage<input type="checkbox" id="T0139.001check" onclick="handleTechniqueClick('T0139.001')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0146.001">T0146.001 Free Account<input type="checkbox" id="T0146.001check" onclick="handleTechniqueClick('T0146.001')"></td>
|
||||
<td id="T0146.001">T0146.001 Free Account Asset<input type="checkbox" id="T0146.001check" onclick="handleTechniqueClick('T0146.001')"></td>
|
||||
<td id="T0152.008">T0152.008 Live Streaming Platform<input type="checkbox" id="T0152.008check" onclick="handleTechniqueClick('T0152.008')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -693,7 +693,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0139.002">T0139.002 Silence<input type="checkbox" id="T0139.002check" onclick="handleTechniqueClick('T0139.002')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0146.002">T0146.002 Paid Account<input type="checkbox" id="T0146.002check" onclick="handleTechniqueClick('T0146.002')"></td>
|
||||
<td id="T0146.002">T0146.002 Paid Account Asset<input type="checkbox" id="T0146.002check" onclick="handleTechniqueClick('T0146.002')"></td>
|
||||
<td id="T0152.009">T0152.009 Software Delivery Platform<input type="checkbox" id="T0152.009check" onclick="handleTechniqueClick('T0152.009')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -711,7 +711,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0139.003">T0139.003 Deter<input type="checkbox" id="T0139.003check" onclick="handleTechniqueClick('T0139.003')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0146.003">T0146.003 Verified Account<input type="checkbox" id="T0146.003check" onclick="handleTechniqueClick('T0146.003')"></td>
|
||||
<td id="T0146.003">T0146.003 Verified Account Asset<input type="checkbox" id="T0146.003check" onclick="handleTechniqueClick('T0146.003')"></td>
|
||||
<td id="T0152.010">T0152.010 File Hosting Platform<input type="checkbox" id="T0152.010check" onclick="handleTechniqueClick('T0152.010')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -729,7 +729,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0140">T0140 Cause Harm<input type="checkbox" id="T0140check" onclick="handleTechniqueClick('T0140')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0146.004">T0146.004 Administrator Account<input type="checkbox" id="T0146.004check" onclick="handleTechniqueClick('T0146.004')"></td>
|
||||
<td id="T0146.004">T0146.004 Administrator Account Asset<input type="checkbox" id="T0146.004check" onclick="handleTechniqueClick('T0146.004')"></td>
|
||||
<td id="T0152.011">T0152.011 Wiki Platform<input type="checkbox" id="T0152.011check" onclick="handleTechniqueClick('T0152.011')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -783,7 +783,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0140.003">T0140.003 Spread Hate<input type="checkbox" id="T0140.003check" onclick="handleTechniqueClick('T0140.003')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0146.007">T0146.007 Automated Account<input type="checkbox" id="T0146.007check" onclick="handleTechniqueClick('T0146.007')"></td>
|
||||
<td id="T0146.007">T0146.007 Automated Account Asset<input type="checkbox" id="T0146.007check" onclick="handleTechniqueClick('T0146.007')"></td>
|
||||
<td id="T0153.001">T0153.001 Email Platform<input type="checkbox" id="T0153.001check" onclick="handleTechniqueClick('T0153.001')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -801,7 +801,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0147">T0147 Software<input type="checkbox" id="T0147check" onclick="handleTechniqueClick('T0147')"></td>
|
||||
<td id="T0147">T0147 Software Asset<input type="checkbox" id="T0147check" onclick="handleTechniqueClick('T0147')"></td>
|
||||
<td id="T0153.002">T0153.002 Link Shortening Platform<input type="checkbox" id="T0153.002check" onclick="handleTechniqueClick('T0153.002')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -819,8 +819,8 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0147.001">T0147.001 Game<input type="checkbox" id="T0147.001check" onclick="handleTechniqueClick('T0147.001')"></td>
|
||||
<td id="T0153.003">T0153.003 Shortened Link<input type="checkbox" id="T0153.003check" onclick="handleTechniqueClick('T0153.003')"></td>
|
||||
<td id="T0147.001">T0147.001 Game Asset<input type="checkbox" id="T0147.001check" onclick="handleTechniqueClick('T0147.001')"></td>
|
||||
<td id="T0153.003">T0153.003 Shortened Link Asset<input type="checkbox" id="T0153.003check" onclick="handleTechniqueClick('T0153.003')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -837,8 +837,8 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0147.002">T0147.002 Game Mod<input type="checkbox" id="T0147.002check" onclick="handleTechniqueClick('T0147.002')"></td>
|
||||
<td id="T0153.004">T0153.004 QR Code<input type="checkbox" id="T0153.004check" onclick="handleTechniqueClick('T0153.004')"></td>
|
||||
<td id="T0147.002">T0147.002 Game Mod Asset<input type="checkbox" id="T0147.002check" onclick="handleTechniqueClick('T0147.002')"></td>
|
||||
<td id="T0153.004">T0153.004 QR Code Asset<input type="checkbox" id="T0153.004check" onclick="handleTechniqueClick('T0153.004')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -855,7 +855,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0147.003">T0147.003 Malware<input type="checkbox" id="T0147.003check" onclick="handleTechniqueClick('T0147.003')"></td>
|
||||
<td id="T0147.003">T0147.003 Malware Asset<input type="checkbox" id="T0147.003check" onclick="handleTechniqueClick('T0147.003')"></td>
|
||||
<td id="T0153.005">T0153.005 Online Advertising Platform<input type="checkbox" id="T0153.005check" onclick="handleTechniqueClick('T0153.005')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -873,7 +873,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0147.004">T0147.004 Mobile App<input type="checkbox" id="T0147.004check" onclick="handleTechniqueClick('T0147.004')"></td>
|
||||
<td id="T0147.004">T0147.004 Mobile App Asset<input type="checkbox" id="T0147.004check" onclick="handleTechniqueClick('T0147.004')"></td>
|
||||
<td id="T0153.006">T0153.006 Content Recommendation Algorithm<input type="checkbox" id="T0153.006check" onclick="handleTechniqueClick('T0153.006')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -927,7 +927,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0148.002">T0148.002 Bank Account<input type="checkbox" id="T0148.002check" onclick="handleTechniqueClick('T0148.002')"></td>
|
||||
<td id="T0148.002">T0148.002 Bank Account Asset<input type="checkbox" id="T0148.002check" onclick="handleTechniqueClick('T0148.002')"></td>
|
||||
<td id="T0154.001">T0154.001 AI LLM Platform<input type="checkbox" id="T0154.001check" onclick="handleTechniqueClick('T0154.001')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -982,7 +982,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0148.005">T0148.005 Subscription Processing Capability<input type="checkbox" id="T0148.005check" onclick="handleTechniqueClick('T0148.005')"></td>
|
||||
<td id="T0155.001">T0155.001 Password Gated<input type="checkbox" id="T0155.001check" onclick="handleTechniqueClick('T0155.001')"></td>
|
||||
<td id="T0155.001">T0155.001 Password Gated Asset<input type="checkbox" id="T0155.001check" onclick="handleTechniqueClick('T0155.001')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1000,7 +1000,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0148.006">T0148.006 Crowdfunding Platform<input type="checkbox" id="T0148.006check" onclick="handleTechniqueClick('T0148.006')"></td>
|
||||
<td id="T0155.002">T0155.002 Invite Gated<input type="checkbox" id="T0155.002check" onclick="handleTechniqueClick('T0155.002')"></td>
|
||||
<td id="T0155.002">T0155.002 Invite Gated Asset<input type="checkbox" id="T0155.002check" onclick="handleTechniqueClick('T0155.002')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1018,7 +1018,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0148.007">T0148.007 eCommerce Platform<input type="checkbox" id="T0148.007check" onclick="handleTechniqueClick('T0148.007')"></td>
|
||||
<td id="T0155.003">T0155.003 Approval Gated<input type="checkbox" id="T0155.003check" onclick="handleTechniqueClick('T0155.003')"></td>
|
||||
<td id="T0155.003">T0155.003 Approval Gated Asset<input type="checkbox" id="T0155.003check" onclick="handleTechniqueClick('T0155.003')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1036,7 +1036,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0148.008">T0148.008 Cryptocurrency Exchange Platform<input type="checkbox" id="T0148.008check" onclick="handleTechniqueClick('T0148.008')"></td>
|
||||
<td id="T0155.004">T0155.004 Geoblocked<input type="checkbox" id="T0155.004check" onclick="handleTechniqueClick('T0155.004')"></td>
|
||||
<td id="T0155.004">T0155.004 Geoblocked Asset<input type="checkbox" id="T0155.004check" onclick="handleTechniqueClick('T0155.004')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1054,7 +1054,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0148.009">T0148.009 Cryptocurrency Wallet<input type="checkbox" id="T0148.009check" onclick="handleTechniqueClick('T0148.009')"></td>
|
||||
<td id="T0155.005">T0155.005 Paid Access<input type="checkbox" id="T0155.005check" onclick="handleTechniqueClick('T0155.005')"></td>
|
||||
<td id="T0155.005">T0155.005 Paid Access Asset<input type="checkbox" id="T0155.005check" onclick="handleTechniqueClick('T0155.005')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1072,7 +1072,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0149">T0149 Online Infrastructure<input type="checkbox" id="T0149check" onclick="handleTechniqueClick('T0149')"></td>
|
||||
<td id="T0155.006">T0155.006 Subscription Access<input type="checkbox" id="T0155.006check" onclick="handleTechniqueClick('T0155.006')"></td>
|
||||
<td id="T0155.006">T0155.006 Subscription Access Asset<input type="checkbox" id="T0155.006check" onclick="handleTechniqueClick('T0155.006')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1089,7 +1089,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0149.001">T0149.001 Domain<input type="checkbox" id="T0149.001check" onclick="handleTechniqueClick('T0149.001')"></td>
|
||||
<td id="T0149.001">T0149.001 Domain Asset<input type="checkbox" id="T0149.001check" onclick="handleTechniqueClick('T0149.001')"></td>
|
||||
<td id="T0155.007">T0155.007 Encrypted Communication Channel<input type="checkbox" id="T0155.007check" onclick="handleTechniqueClick('T0155.007')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1107,7 +1107,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0149.002">T0149.002 Email Domain<input type="checkbox" id="T0149.002check" onclick="handleTechniqueClick('T0149.002')"></td>
|
||||
<td id="T0149.002">T0149.002 Email Domain Asset<input type="checkbox" id="T0149.002check" onclick="handleTechniqueClick('T0149.002')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1143,7 +1143,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0149.004">T0149.004 Redirecting Domain<input type="checkbox" id="T0149.004check" onclick="handleTechniqueClick('T0149.004')"></td>
|
||||
<td id="T0149.004">T0149.004 Redirecting Domain Asset<input type="checkbox" id="T0149.004check" onclick="handleTechniqueClick('T0149.004')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1161,7 +1161,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0149.005">T0149.005 Server<input type="checkbox" id="T0149.005check" onclick="handleTechniqueClick('T0149.005')"></td>
|
||||
<td id="T0149.005">T0149.005 Server Asset<input type="checkbox" id="T0149.005check" onclick="handleTechniqueClick('T0149.005')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1179,7 +1179,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0149.006">T0149.006 IP Address<input type="checkbox" id="T0149.006check" onclick="handleTechniqueClick('T0149.006')"></td>
|
||||
<td id="T0149.006">T0149.006 IP Address Asset<input type="checkbox" id="T0149.006check" onclick="handleTechniqueClick('T0149.006')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1197,7 +1197,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0149.007">T0149.007 VPN<input type="checkbox" id="T0149.007check" onclick="handleTechniqueClick('T0149.007')"></td>
|
||||
<td id="T0149.007">T0149.007 VPN Asset<input type="checkbox" id="T0149.007check" onclick="handleTechniqueClick('T0149.007')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1215,7 +1215,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0149.008">T0149.008 Proxy IP Address<input type="checkbox" id="T0149.008check" onclick="handleTechniqueClick('T0149.008')"></td>
|
||||
<td id="T0149.008">T0149.008 Proxy IP Address Asset<input type="checkbox" id="T0149.008check" onclick="handleTechniqueClick('T0149.008')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1269,7 +1269,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0150.001">T0150.001 Newly Created<input type="checkbox" id="T0150.001check" onclick="handleTechniqueClick('T0150.001')"></td>
|
||||
<td id="T0150.001">T0150.001 Newly Created Asset<input type="checkbox" id="T0150.001check" onclick="handleTechniqueClick('T0150.001')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1287,7 +1287,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0150.002">T0150.002 Dormant<input type="checkbox" id="T0150.002check" onclick="handleTechniqueClick('T0150.002')"></td>
|
||||
<td id="T0150.002">T0150.002 Dormant Asset<input type="checkbox" id="T0150.002check" onclick="handleTechniqueClick('T0150.002')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1305,7 +1305,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0150.003">T0150.003 Pre-Existing<input type="checkbox" id="T0150.003check" onclick="handleTechniqueClick('T0150.003')"></td>
|
||||
<td id="T0150.003">T0150.003 Pre-Existing Asset<input type="checkbox" id="T0150.003check" onclick="handleTechniqueClick('T0150.003')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1323,7 +1323,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0150.004">T0150.004 Repurposed<input type="checkbox" id="T0150.004check" onclick="handleTechniqueClick('T0150.004')"></td>
|
||||
<td id="T0150.004">T0150.004 Repurposed Asset<input type="checkbox" id="T0150.004check" onclick="handleTechniqueClick('T0150.004')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1341,7 +1341,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0150.005">T0150.005 Compromised<input type="checkbox" id="T0150.005check" onclick="handleTechniqueClick('T0150.005')"></td>
|
||||
<td id="T0150.005">T0150.005 Compromised Asset<input type="checkbox" id="T0150.005check" onclick="handleTechniqueClick('T0150.005')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1359,7 +1359,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0150.006">T0150.006 Purchased<input type="checkbox" id="T0150.006check" onclick="handleTechniqueClick('T0150.006')"></td>
|
||||
<td id="T0150.006">T0150.006 Purchased Asset<input type="checkbox" id="T0150.006check" onclick="handleTechniqueClick('T0150.006')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1377,7 +1377,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0150.007">T0150.007 Rented<input type="checkbox" id="T0150.007check" onclick="handleTechniqueClick('T0150.007')"></td>
|
||||
<td id="T0150.007">T0150.007 Rented Asset<input type="checkbox" id="T0150.007check" onclick="handleTechniqueClick('T0150.007')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1395,7 +1395,7 @@ function handleTechniqueClick(box) {
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td id="T0150.008">T0150.008 Bulk Created<input type="checkbox" id="T0150.008check" onclick="handleTechniqueClick('T0150.008')"></td>
|
||||
<td id="T0150.008">T0150.008 Bulk Created Asset<input type="checkbox" id="T0150.008check" onclick="handleTechniqueClick('T0150.008')"></td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
<td bgcolor=white> </td>
|
||||
@ -1703,7 +1703,7 @@ function handleTechniqueClick(box) {
|
||||
<li id="T0100.001text" style="display:none">T0100.001: Co-Opt Trusted Individuals</li>
|
||||
<li id="T0137.006text" style="display:none">T0137.006: Manipulate Stocks</li>
|
||||
<li id="T0088.001text" style="display:none">T0088.001: Develop AI-Generated Audio (Deepfakes)</li>
|
||||
<li id="T0152.002text" style="display:none">T0152.002: Blog</li>
|
||||
<li id="T0152.002text" style="display:none">T0152.002: Blog Asset</li>
|
||||
<li id="T0145.004text" style="display:none">T0145.004: Scenery Account Imagery</li>
|
||||
<li id="T0100.002text" style="display:none">T0100.002: Co-Opt Grassroots Groups</li>
|
||||
<li id="T0138text" style="display:none">T0138: Motivate to Act</li>
|
||||
@ -1713,7 +1713,7 @@ function handleTechniqueClick(box) {
|
||||
<li id="T0100.003text" style="display:none">T0100.003: Co-Opt Influencers</li>
|
||||
<li id="T0138.001text" style="display:none">T0138.001: Encourage</li>
|
||||
<li id="T0089text" style="display:none">T0089: Obtain Private Documents</li>
|
||||
<li id="T0152.004text" style="display:none">T0152.004: Website</li>
|
||||
<li id="T0152.004text" style="display:none">T0152.004: Website Asset</li>
|
||||
<li id="T0145.006text" style="display:none">T0145.006: Attractive Person Account Imagery</li>
|
||||
<li id="T0143text" style="display:none">T0143: Persona Legitimacy</li>
|
||||
<li id="T0138.002text" style="display:none">T0138.002: Provoke</li>
|
||||
@ -1726,23 +1726,23 @@ function handleTechniqueClick(box) {
|
||||
<li id="T0152.006text" style="display:none">T0152.006: Video Platform</li>
|
||||
<li id="T0143.002text" style="display:none">T0143.002: Fabricated Persona</li>
|
||||
<li id="T0139text" style="display:none">T0139: Dissuade from Acting</li>
|
||||
<li id="T0146text" style="display:none">T0146: Account</li>
|
||||
<li id="T0146text" style="display:none">T0146: Account Asset</li>
|
||||
<li id="T0152.007text" style="display:none">T0152.007: Audio Platform</li>
|
||||
<li id="T0143.003text" style="display:none">T0143.003: Impersonated Persona</li>
|
||||
<li id="T0139.001text" style="display:none">T0139.001: Discourage</li>
|
||||
<li id="T0146.001text" style="display:none">T0146.001: Free Account</li>
|
||||
<li id="T0146.001text" style="display:none">T0146.001: Free Account Asset</li>
|
||||
<li id="T0152.008text" style="display:none">T0152.008: Live Streaming Platform</li>
|
||||
<li id="T0143.004text" style="display:none">T0143.004: Parody Persona</li>
|
||||
<li id="T0139.002text" style="display:none">T0139.002: Silence</li>
|
||||
<li id="T0146.002text" style="display:none">T0146.002: Paid Account</li>
|
||||
<li id="T0146.002text" style="display:none">T0146.002: Paid Account Asset</li>
|
||||
<li id="T0152.009text" style="display:none">T0152.009: Software Delivery Platform</li>
|
||||
<li id="T0144text" style="display:none">T0144: Persona Legitimacy Evidence</li>
|
||||
<li id="T0139.003text" style="display:none">T0139.003: Deter</li>
|
||||
<li id="T0146.003text" style="display:none">T0146.003: Verified Account</li>
|
||||
<li id="T0146.003text" style="display:none">T0146.003: Verified Account Asset</li>
|
||||
<li id="T0152.010text" style="display:none">T0152.010: File Hosting Platform</li>
|
||||
<li id="T0144.001text" style="display:none">T0144.001: Present Persona across Platforms</li>
|
||||
<li id="T0140text" style="display:none">T0140: Cause Harm</li>
|
||||
<li id="T0146.004text" style="display:none">T0146.004: Administrator Account</li>
|
||||
<li id="T0146.004text" style="display:none">T0146.004: Administrator Account Asset</li>
|
||||
<li id="T0152.011text" style="display:none">T0152.011: Wiki Platform</li>
|
||||
<li id="T0144.002text" style="display:none">T0144.002: Persona Template</li>
|
||||
<li id="T0140.001text" style="display:none">T0140.001: Defame</li>
|
||||
@ -1752,59 +1752,59 @@ function handleTechniqueClick(box) {
|
||||
<li id="T0146.006text" style="display:none">T0146.006: Open Access Platform</li>
|
||||
<li id="T0153text" style="display:none">T0153: Digital Content Delivery Asset</li>
|
||||
<li id="T0140.003text" style="display:none">T0140.003: Spread Hate</li>
|
||||
<li id="T0146.007text" style="display:none">T0146.007: Automated Account</li>
|
||||
<li id="T0146.007text" style="display:none">T0146.007: Automated Account Asset</li>
|
||||
<li id="T0153.001text" style="display:none">T0153.001: Email Platform</li>
|
||||
<li id="T0147text" style="display:none">T0147: Software</li>
|
||||
<li id="T0147text" style="display:none">T0147: Software Asset</li>
|
||||
<li id="T0153.002text" style="display:none">T0153.002: Link Shortening Platform</li>
|
||||
<li id="T0147.001text" style="display:none">T0147.001: Game</li>
|
||||
<li id="T0153.003text" style="display:none">T0153.003: Shortened Link</li>
|
||||
<li id="T0147.002text" style="display:none">T0147.002: Game Mod</li>
|
||||
<li id="T0153.004text" style="display:none">T0153.004: QR Code</li>
|
||||
<li id="T0147.003text" style="display:none">T0147.003: Malware</li>
|
||||
<li id="T0147.001text" style="display:none">T0147.001: Game Asset</li>
|
||||
<li id="T0153.003text" style="display:none">T0153.003: Shortened Link Asset</li>
|
||||
<li id="T0147.002text" style="display:none">T0147.002: Game Mod Asset</li>
|
||||
<li id="T0153.004text" style="display:none">T0153.004: QR Code Asset</li>
|
||||
<li id="T0147.003text" style="display:none">T0147.003: Malware Asset</li>
|
||||
<li id="T0153.005text" style="display:none">T0153.005: Online Advertising Platform</li>
|
||||
<li id="T0147.004text" style="display:none">T0147.004: Mobile App</li>
|
||||
<li id="T0147.004text" style="display:none">T0147.004: Mobile App Asset</li>
|
||||
<li id="T0153.006text" style="display:none">T0153.006: Content Recommendation Algorithm</li>
|
||||
<li id="T0148text" style="display:none">T0148: Financial Instrument</li>
|
||||
<li id="T0153.007text" style="display:none">T0153.007: Direct Messaging</li>
|
||||
<li id="T0148.001text" style="display:none">T0148.001: Online Banking Platform</li>
|
||||
<li id="T0154text" style="display:none">T0154: Digital Content Creation Asset</li>
|
||||
<li id="T0148.002text" style="display:none">T0148.002: Bank Account</li>
|
||||
<li id="T0148.002text" style="display:none">T0148.002: Bank Account Asset</li>
|
||||
<li id="T0154.001text" style="display:none">T0154.001: AI LLM Platform</li>
|
||||
<li id="T0148.003text" style="display:none">T0148.003: Payment Processing Platform</li>
|
||||
<li id="T0154.002text" style="display:none">T0154.002: AI Media Platform</li>
|
||||
<li id="T0148.004text" style="display:none">T0148.004: Payment Processing Capability</li>
|
||||
<li id="T0155text" style="display:none">T0155: Gated Asset</li>
|
||||
<li id="T0148.005text" style="display:none">T0148.005: Subscription Processing Capability</li>
|
||||
<li id="T0155.001text" style="display:none">T0155.001: Password Gated</li>
|
||||
<li id="T0155.001text" style="display:none">T0155.001: Password Gated Asset</li>
|
||||
<li id="T0148.006text" style="display:none">T0148.006: Crowdfunding Platform</li>
|
||||
<li id="T0155.002text" style="display:none">T0155.002: Invite Gated</li>
|
||||
<li id="T0155.002text" style="display:none">T0155.002: Invite Gated Asset</li>
|
||||
<li id="T0148.007text" style="display:none">T0148.007: eCommerce Platform</li>
|
||||
<li id="T0155.003text" style="display:none">T0155.003: Approval Gated</li>
|
||||
<li id="T0155.003text" style="display:none">T0155.003: Approval Gated Asset</li>
|
||||
<li id="T0148.008text" style="display:none">T0148.008: Cryptocurrency Exchange Platform</li>
|
||||
<li id="T0155.004text" style="display:none">T0155.004: Geoblocked</li>
|
||||
<li id="T0155.004text" style="display:none">T0155.004: Geoblocked Asset</li>
|
||||
<li id="T0148.009text" style="display:none">T0148.009: Cryptocurrency Wallet</li>
|
||||
<li id="T0155.005text" style="display:none">T0155.005: Paid Access</li>
|
||||
<li id="T0155.005text" style="display:none">T0155.005: Paid Access Asset</li>
|
||||
<li id="T0149text" style="display:none">T0149: Online Infrastructure</li>
|
||||
<li id="T0155.006text" style="display:none">T0155.006: Subscription Access</li>
|
||||
<li id="T0149.001text" style="display:none">T0149.001: Domain</li>
|
||||
<li id="T0155.006text" style="display:none">T0155.006: Subscription Access Asset</li>
|
||||
<li id="T0149.001text" style="display:none">T0149.001: Domain Asset</li>
|
||||
<li id="T0155.007text" style="display:none">T0155.007: Encrypted Communication Channel</li>
|
||||
<li id="T0149.002text" style="display:none">T0149.002: Email Domain</li>
|
||||
<li id="T0149.002text" style="display:none">T0149.002: Email Domain Asset</li>
|
||||
<li id="T0149.003text" style="display:none">T0149.003: Lookalike Domain</li>
|
||||
<li id="T0149.004text" style="display:none">T0149.004: Redirecting Domain</li>
|
||||
<li id="T0149.005text" style="display:none">T0149.005: Server</li>
|
||||
<li id="T0149.006text" style="display:none">T0149.006: IP Address</li>
|
||||
<li id="T0149.007text" style="display:none">T0149.007: VPN</li>
|
||||
<li id="T0149.008text" style="display:none">T0149.008: Proxy IP Address</li>
|
||||
<li id="T0149.004text" style="display:none">T0149.004: Redirecting Domain Asset</li>
|
||||
<li id="T0149.005text" style="display:none">T0149.005: Server Asset</li>
|
||||
<li id="T0149.006text" style="display:none">T0149.006: IP Address Asset</li>
|
||||
<li id="T0149.007text" style="display:none">T0149.007: VPN Asset</li>
|
||||
<li id="T0149.008text" style="display:none">T0149.008: Proxy IP Address Asset</li>
|
||||
<li id="T0149.009text" style="display:none">T0149.009: Internet Connected Physical Asset</li>
|
||||
<li id="T0150text" style="display:none">T0150: Asset Origin</li>
|
||||
<li id="T0150.001text" style="display:none">T0150.001: Newly Created</li>
|
||||
<li id="T0150.002text" style="display:none">T0150.002: Dormant</li>
|
||||
<li id="T0150.003text" style="display:none">T0150.003: Pre-Existing</li>
|
||||
<li id="T0150.004text" style="display:none">T0150.004: Repurposed</li>
|
||||
<li id="T0150.005text" style="display:none">T0150.005: Compromised</li>
|
||||
<li id="T0150.006text" style="display:none">T0150.006: Purchased</li>
|
||||
<li id="T0150.007text" style="display:none">T0150.007: Rented</li>
|
||||
<li id="T0150.008text" style="display:none">T0150.008: Bulk Created</li>
|
||||
<li id="T0150.001text" style="display:none">T0150.001: Newly Created Asset</li>
|
||||
<li id="T0150.002text" style="display:none">T0150.002: Dormant Asset</li>
|
||||
<li id="T0150.003text" style="display:none">T0150.003: Pre-Existing Asset</li>
|
||||
<li id="T0150.004text" style="display:none">T0150.004: Repurposed Asset</li>
|
||||
<li id="T0150.005text" style="display:none">T0150.005: Compromised Asset</li>
|
||||
<li id="T0150.006text" style="display:none">T0150.006: Purchased Asset</li>
|
||||
<li id="T0150.007text" style="display:none">T0150.007: Rented Asset</li>
|
||||
<li id="T0150.008text" style="display:none">T0150.008: Bulk Created Asset</li>
|
||||
</ul>
|
||||
|
||||
</body>
|
||||
|
@ -529,7 +529,7 @@
|
||||
<td><a href="techniques/T0137.006.md">T0137.006 Manipulate Stocks</a></td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0088.001.md">T0088.001 Develop AI-Generated Audio (Deepfakes)</a></td>
|
||||
<td><a href="techniques/T0152.002.md">T0152.002 Blog</a></td>
|
||||
<td><a href="techniques/T0152.002.md">T0152.002 Blog Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -565,7 +565,7 @@
|
||||
<td><a href="techniques/T0138.001.md">T0138.001 Encourage</a></td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0089.md">T0089 Obtain Private Documents</a></td>
|
||||
<td><a href="techniques/T0152.004.md">T0152.004 Website</a></td>
|
||||
<td><a href="techniques/T0152.004.md">T0152.004 Website Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -618,7 +618,7 @@
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0139.md">T0139 Dissuade from Acting</a></td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0146.md">T0146 Account</a></td>
|
||||
<td><a href="techniques/T0146.md">T0146 Account Asset</a></td>
|
||||
<td><a href="techniques/T0152.007.md">T0152.007 Audio Platform</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -636,7 +636,7 @@
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0139.001.md">T0139.001 Discourage</a></td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0146.001.md">T0146.001 Free Account</a></td>
|
||||
<td><a href="techniques/T0146.001.md">T0146.001 Free Account Asset</a></td>
|
||||
<td><a href="techniques/T0152.008.md">T0152.008 Live Streaming Platform</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -654,7 +654,7 @@
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0139.002.md">T0139.002 Silence</a></td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0146.002.md">T0146.002 Paid Account</a></td>
|
||||
<td><a href="techniques/T0146.002.md">T0146.002 Paid Account Asset</a></td>
|
||||
<td><a href="techniques/T0152.009.md">T0152.009 Software Delivery Platform</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -672,7 +672,7 @@
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0139.003.md">T0139.003 Deter</a></td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0146.003.md">T0146.003 Verified Account</a></td>
|
||||
<td><a href="techniques/T0146.003.md">T0146.003 Verified Account Asset</a></td>
|
||||
<td><a href="techniques/T0152.010.md">T0152.010 File Hosting Platform</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -690,7 +690,7 @@
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0140.md">T0140 Cause Harm</a></td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0146.004.md">T0146.004 Administrator Account</a></td>
|
||||
<td><a href="techniques/T0146.004.md">T0146.004 Administrator Account Asset</a></td>
|
||||
<td><a href="techniques/T0152.011.md">T0152.011 Wiki Platform</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -744,7 +744,7 @@
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0140.003.md">T0140.003 Spread Hate</a></td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0146.007.md">T0146.007 Automated Account</a></td>
|
||||
<td><a href="techniques/T0146.007.md">T0146.007 Automated Account Asset</a></td>
|
||||
<td><a href="techniques/T0153.001.md">T0153.001 Email Platform</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -762,7 +762,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0147.md">T0147 Software</a></td>
|
||||
<td><a href="techniques/T0147.md">T0147 Software Asset</a></td>
|
||||
<td><a href="techniques/T0153.002.md">T0153.002 Link Shortening Platform</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -780,8 +780,8 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0147.001.md">T0147.001 Game</a></td>
|
||||
<td><a href="techniques/T0153.003.md">T0153.003 Shortened Link</a></td>
|
||||
<td><a href="techniques/T0147.001.md">T0147.001 Game Asset</a></td>
|
||||
<td><a href="techniques/T0153.003.md">T0153.003 Shortened Link Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -798,8 +798,8 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0147.002.md">T0147.002 Game Mod</a></td>
|
||||
<td><a href="techniques/T0153.004.md">T0153.004 QR Code</a></td>
|
||||
<td><a href="techniques/T0147.002.md">T0147.002 Game Mod Asset</a></td>
|
||||
<td><a href="techniques/T0153.004.md">T0153.004 QR Code Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -816,7 +816,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0147.003.md">T0147.003 Malware</a></td>
|
||||
<td><a href="techniques/T0147.003.md">T0147.003 Malware Asset</a></td>
|
||||
<td><a href="techniques/T0153.005.md">T0153.005 Online Advertising Platform</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -834,7 +834,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0147.004.md">T0147.004 Mobile App</a></td>
|
||||
<td><a href="techniques/T0147.004.md">T0147.004 Mobile App Asset</a></td>
|
||||
<td><a href="techniques/T0153.006.md">T0153.006 Content Recommendation Algorithm</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -888,7 +888,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0148.002.md">T0148.002 Bank Account</a></td>
|
||||
<td><a href="techniques/T0148.002.md">T0148.002 Bank Account Asset</a></td>
|
||||
<td><a href="techniques/T0154.001.md">T0154.001 AI LLM Platform</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -943,7 +943,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0148.005.md">T0148.005 Subscription Processing Capability</a></td>
|
||||
<td><a href="techniques/T0155.001.md">T0155.001 Password Gated</a></td>
|
||||
<td><a href="techniques/T0155.001.md">T0155.001 Password Gated Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -961,7 +961,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0148.006.md">T0148.006 Crowdfunding Platform</a></td>
|
||||
<td><a href="techniques/T0155.002.md">T0155.002 Invite Gated</a></td>
|
||||
<td><a href="techniques/T0155.002.md">T0155.002 Invite Gated Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -979,7 +979,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0148.007.md">T0148.007 eCommerce Platform</a></td>
|
||||
<td><a href="techniques/T0155.003.md">T0155.003 Approval Gated</a></td>
|
||||
<td><a href="techniques/T0155.003.md">T0155.003 Approval Gated Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -997,7 +997,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0148.008.md">T0148.008 Cryptocurrency Exchange Platform</a></td>
|
||||
<td><a href="techniques/T0155.004.md">T0155.004 Geoblocked</a></td>
|
||||
<td><a href="techniques/T0155.004.md">T0155.004 Geoblocked Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1015,7 +1015,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0148.009.md">T0148.009 Cryptocurrency Wallet</a></td>
|
||||
<td><a href="techniques/T0155.005.md">T0155.005 Paid Access</a></td>
|
||||
<td><a href="techniques/T0155.005.md">T0155.005 Paid Access Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1033,7 +1033,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0149.md">T0149 Online Infrastructure</a></td>
|
||||
<td><a href="techniques/T0155.006.md">T0155.006 Subscription Access</a></td>
|
||||
<td><a href="techniques/T0155.006.md">T0155.006 Subscription Access Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1050,7 +1050,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0149.001.md">T0149.001 Domain</a></td>
|
||||
<td><a href="techniques/T0149.001.md">T0149.001 Domain Asset</a></td>
|
||||
<td><a href="techniques/T0155.007.md">T0155.007 Encrypted Communication Channel</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1068,7 +1068,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0149.002.md">T0149.002 Email Domain</a></td>
|
||||
<td><a href="techniques/T0149.002.md">T0149.002 Email Domain Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1104,7 +1104,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0149.004.md">T0149.004 Redirecting Domain</a></td>
|
||||
<td><a href="techniques/T0149.004.md">T0149.004 Redirecting Domain Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1122,7 +1122,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0149.005.md">T0149.005 Server</a></td>
|
||||
<td><a href="techniques/T0149.005.md">T0149.005 Server Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1140,7 +1140,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0149.006.md">T0149.006 IP Address</a></td>
|
||||
<td><a href="techniques/T0149.006.md">T0149.006 IP Address Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1158,7 +1158,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0149.007.md">T0149.007 VPN</a></td>
|
||||
<td><a href="techniques/T0149.007.md">T0149.007 VPN Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1176,7 +1176,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0149.008.md">T0149.008 Proxy IP Address</a></td>
|
||||
<td><a href="techniques/T0149.008.md">T0149.008 Proxy IP Address Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1230,7 +1230,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0150.001.md">T0150.001 Newly Created</a></td>
|
||||
<td><a href="techniques/T0150.001.md">T0150.001 Newly Created Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1248,7 +1248,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0150.002.md">T0150.002 Dormant</a></td>
|
||||
<td><a href="techniques/T0150.002.md">T0150.002 Dormant Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1266,7 +1266,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0150.003.md">T0150.003 Pre-Existing</a></td>
|
||||
<td><a href="techniques/T0150.003.md">T0150.003 Pre-Existing Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1284,7 +1284,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0150.004.md">T0150.004 Repurposed</a></td>
|
||||
<td><a href="techniques/T0150.004.md">T0150.004 Repurposed Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1302,7 +1302,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0150.005.md">T0150.005 Compromised</a></td>
|
||||
<td><a href="techniques/T0150.005.md">T0150.005 Compromised Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1320,7 +1320,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0150.006.md">T0150.006 Purchased</a></td>
|
||||
<td><a href="techniques/T0150.006.md">T0150.006 Purchased Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1338,7 +1338,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0150.007.md">T0150.007 Rented</a></td>
|
||||
<td><a href="techniques/T0150.007.md">T0150.007 Rented Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
@ -1356,7 +1356,7 @@
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td><a href="techniques/T0150.008.md">T0150.008 Bulk Created</a></td>
|
||||
<td><a href="techniques/T0150.008.md">T0150.008 Bulk Created Asset</a></td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
|
@ -20,9 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0017 Conduct Fundraising](../../generated_pages/techniques/T0017.md) | IT00000002 Promote "funding" campaign |
|
||||
| [T0018 Purchase Targeted Advertisements](../../generated_pages/techniques/T0018.md) | IT00000001 buy FB targeted ads |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000005 SEO optimisation/manipulation ("key words") |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -29,11 +29,6 @@ The report adds that although officially the Russian government asserted its neu
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000016 cultivate, manipulate, exploit useful idiots |
|
||||
| [T0018 Purchase Targeted Advertisements](../../generated_pages/techniques/T0018.md) | IT00000010 Targeted FB paid ads |
|
||||
| [T0029 Online Polls](../../generated_pages/techniques/T0029.md) | IT00000013 manipulate social media "online polls"? |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000022 SEO optimisation/manipulation ("key words") |
|
||||
| [T0057 Organise Events](../../generated_pages/techniques/T0057.md) | IT00000012 Digital to physical "organize+promote" rallies & events? |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,9 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0015 Create Hashtags and Search Artefacts](../../generated_pages/techniques/T0015.md) | IT00000027 Create and use hashtag |
|
||||
| [T0039 Bait Influencer](../../generated_pages/techniques/T0039.md) | IT00000030 bait journalists/media/politicians |
|
||||
| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) | IT00000025 Use SMS/text messages |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,7 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000033 cultivate, manipulate, exploit useful idiots (in the case Paul Manafort) |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,7 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0045 Use Fake Experts](../../generated_pages/techniques/T0045.md) | IT00000036 Using "expert" |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,10 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000044 cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories; false flags, crisis actors) |
|
||||
| [T0020 Trial Content](../../generated_pages/techniques/T0020.md) | IT00000048 4Chan/8Chan - trial content |
|
||||
| [T0039 Bait Influencer](../../generated_pages/techniques/T0039.md) | IT00000049 journalist/media baiting |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000043 SEO optimisation/manipulation ("key words") |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,8 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0039 Bait Influencer](../../generated_pages/techniques/T0039.md) | IT00000053 journalist/media baiting |
|
||||
| [T0044 Seed Distortions](../../generated_pages/techniques/T0044.md) | IT00000052 Circulate to media via DM, then release publicly |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,13 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000063 cultivate, manipulate, exploit useful idiots |
|
||||
| [T0016 Create Clickbait](../../generated_pages/techniques/T0016.md) | IT00000073 Click-bait (economic actors) fake news sites (ie: Denver Guardian; Macedonian teens) |
|
||||
| [T0018 Purchase Targeted Advertisements](../../generated_pages/techniques/T0018.md) | IT00000057 Targeted FB paid ads |
|
||||
| [T0020 Trial Content](../../generated_pages/techniques/T0020.md) | IT00000070 4Chan/8Chan - trial content |
|
||||
| [T0029 Online Polls](../../generated_pages/techniques/T0029.md) | IT00000060 manipulate social media "online polls"? |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000071 SEO optimisation/manipulation ("key words") |
|
||||
| [T0057 Organise Events](../../generated_pages/techniques/T0057.md) | IT00000059 Digital to physical "organize+promote" rallies & events |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,9 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000084 cultivate, manipulate, exploit useful idiots |
|
||||
| [T0040 Demand Insurmountable Proof](../../generated_pages/techniques/T0040.md) | IT00000089 Demand insurmountable proof |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000085 SEO optimisation/manipulation ("key words") |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,10 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000104 cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories) |
|
||||
| [T0020 Trial Content](../../generated_pages/techniques/T0020.md) | IT00000102 4Chan/8Chan - trial content |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000103 SEO optimisation/manipulation ("key words") |
|
||||
| [T0057 Organise Events](../../generated_pages/techniques/T0057.md) | IT00000093 Digital to physical "organize+promote" rallies & events? |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,10 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0002 Facilitate State Propaganda](../../generated_pages/techniques/T0002.md) | IT00000110 facilitate state propaganda and defuse crises |
|
||||
| [T0047 Censor Social Media as a Political Force](../../generated_pages/techniques/T0047.md) | IT00000108 cow online opinion leaders into submission, muzzling social media as a political force |
|
||||
| [T0048 Harass](../../generated_pages/techniques/T0048.md) | IT00000109 cow online opinion leaders into submission, muzzling social media as a political force |
|
||||
| [T0049 Flood Information Space](../../generated_pages/techniques/T0049.md) | IT00000105 2,000,000 people (est.) part of state run/sponsored astroturfing |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -22,9 +22,6 @@ Unique for taking place outside of the Chinese Internet system, both transgressi
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0002 Facilitate State Propaganda](../../generated_pages/techniques/T0002.md) | IT00000111 Netizens from one of the largest discussion forums in China, known as Diba, coordinated to overcome China’s Great Firewall |
|
||||
| [T0049 Flood Information Space](../../generated_pages/techniques/T0049.md) | IT00000112 flood the Facebook pages of Taiwanese politicians and news agencies with a pro-PRC message |
|
||||
| [T0049 Flood Information Space](../../generated_pages/techniques/T0049.md) | IT00000113 Democratic Progressive Party (DPP), attracted nearly 40,000 Facebook comments in just eight hours. |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,9 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000126 cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories) |
|
||||
| [T0020 Trial Content](../../generated_pages/techniques/T0020.md) | IT00000124 4Chan/8Chan - trial content |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000125 SEO optimisation/manipulation ("key words") |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -21,8 +21,6 @@ The Russian Federal Security Service (FSB), which is also responsible for border
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0023 Distort Facts](../../generated_pages/techniques/T0023.md) | IT00000130 (Distort) Kremlin-controlled RT cited Russian Minister of Foreign Affairs Sergei Lavrov suggesting that Ukraine deliberately provoked Russia in hopes of gaining additional support from the United States and Europe. |
|
||||
| [T0040 Demand Insurmountable Proof](../../generated_pages/techniques/T0040.md) | IT00000133 Demand insurmountable proof |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,8 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000139 cultivate, manipulate, exploit useful idiots (Roger Waters; Venessa Beeley...) |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000140 SEO optimisation/manipulation ("key words") |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -23,8 +23,6 @@ Maduro has remained defiant in the face of domestic and international pressure,
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000148 cultivate, manipulate, exploit useful idiots (Roger Waters) |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000149 SEO optimisation/manipulation ("key words") |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -21,8 +21,6 @@ The FCO comments on the IfS were issued after a news report said the group had r
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000158 cultivate, manipulate, exploit useful idiots |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000161 SEO optimisation/manipulation ("key words") |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -21,9 +21,6 @@ Geopolitically complex issue combines US/China trade; Security concerns/issues r
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0023 Distort Facts](../../generated_pages/techniques/T0023.md) | IT00000163 Distorted, saccharine “news” about the Chinese State and Party |
|
||||
| [T0057 Organise Events](../../generated_pages/techniques/T0057.md) | IT00000164 Events coordinated and promoted across media platforms |
|
||||
| [T0057 Organise Events](../../generated_pages/techniques/T0057.md) | IT00000166 Extend digital the physical space… gatherings ie: support for Meng outside courthouse |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -21,8 +21,6 @@ While there is history to Iran’s information/influence operations, starting wi
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0022 Leverage Conspiracy Theory Narratives](../../generated_pages/techniques/T0022.md) | IT00000174 Memes... anti-Isreal/USA/West, conspiracy narratives |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000172 SEO optimisation/manipulation ("key words") |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -20,8 +20,6 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0010 Cultivate Ignorant Agents](../../generated_pages/techniques/T0010.md) | IT00000182 cultivate, manipulate, exploit useful idiots |
|
||||
| [T0046 Use Search Engine Optimisation](../../generated_pages/techniques/T0046.md) | IT00000183 SEO optimisation/manipulation ("key words") |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -21,8 +21,9 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000242 <I>“In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]<br><br> “The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”</i><br><br> In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account, T0150.007: Rented, T0151.017: Dating Platform). |
|
||||
| [T0150.007 Rented](../../generated_pages/techniques/T0150.007.md) | IT00000240 <I>“In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]<br><br> “The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”</i><br><br> In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account, T0150.007: Rented, T0151.017: Dating Platform). |
|
||||
| [T0097.109 Romantic Suitor Persona](../../generated_pages/techniques/T0097.109.md) | IT00000241 <I>“In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]<br><br> “The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”</i><br><br> In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.007: Rented Asset, T0151.017: Dating Platform). |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000242 <I>“In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]<br><br> “The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”</i><br><br> In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.007: Rented Asset, T0151.017: Dating Platform). |
|
||||
| [T0150.007 Rented Asset](../../generated_pages/techniques/T0150.007.md) | IT00000240 <I>“In the days leading up to the UK’s [2019] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes. [...]<br><br> “The activists maintain that the project was meant to foster democratic engagement. But screenshots of the bots’ activity expose a harsher reality. Images of conversations between real users and these bots, posted on i-D, Mashable, as well as on Fowler and Goodman’s public Twitter accounts, show that the bots did not identify themselves as automated accounts, instead posing as the user whose profile they had taken over. While conducting research for this story, it turned out that a number of [the reporters’ friends] living in Oxford had interacted with the bot in the lead up to the election and had no idea that it was not a real person.”</i><br><br> In this example people offered up their real accounts for the automation of political messaging; the actors convinced the users to give up access to their accounts to use in the operation. The actors maintained the accounts’ existing persona, and presented themselves as potential romantic suitors for legitimate platform users (T0097:109 Romantic Suitor Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.007: Rented Asset, T0151.017: Dating Platform). |
|
||||
| [T0151.017 Dating Platform](../../generated_pages/techniques/T0151.017.md) | IT00000214 _"In the days leading up to the UK’s [2017] general election, youths looking for love online encountered a whole new kind of Tinder nightmare. A group of young activists built a Tinder chatbot to co-opt profiles and persuade swing voters to support Labour. The bot accounts sent 30,000-40,000 messages to targeted 18-25 year olds in battleground constituencies like Dudley North, which Labour ended up winning by only 22 votes._<br /> <br />_"Tinder is a dating app where users swipe right to indicate attraction and interest in a potential partner. If both people swipe right on each other’s profile, a dialogue box becomes available for them to privately chat. After meeting their crowdfunding goal of only £500, the team built a tool which took over and operated the accounts of recruited Tinder-users. By upgrading the profiles to Tinder Premium, the team was able to place bots in any contested constituency across the UK. Once planted, the bots swiped right on all users in the attempt to get the largest number of matches and inquire into their voting intentions."_ <br /> <br />This incident matches T0151.017: Dating Platform, as users of Tinder were targeted in an attempt to persuade users to vote for a particular party in the upcoming election, rather than for the purpose of connecting those who were authentically interested in dating each other. |
|
||||
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0097.110 Party Official Persona](../../generated_pages/techniques/T0097.110.md) | IT00000215 _”Overall, narratives promoted in the five operations appear to represent a concerted effort to discredit the ruling political coalition, widen existing domestic political divisions and project an image of coalition disunity in Poland. In each incident, content was primarily disseminated via Twitter, Facebook, and/ or Instagram accounts belonging to Polish politicians, all of whom have publicly claimed their accounts were compromised at the times the posts were made."_ <br /> <br />This example demonstrates how threat actors can use compromised accounts to distribute inauthentic content while exploiting the legitimate account holder’s persona (T0097.110: Party Official Persona, T0143.003: Impersonated Persona, T0146: Account, T0150.005: Compromised). |
|
||||
| [T0097.110 Party Official Persona](../../generated_pages/techniques/T0097.110.md) | IT00000215 _”Overall, narratives promoted in the five operations appear to represent a concerted effort to discredit the ruling political coalition, widen existing domestic political divisions and project an image of coalition disunity in Poland. In each incident, content was primarily disseminated via Twitter, Facebook, and/ or Instagram accounts belonging to Polish politicians, all of whom have publicly claimed their accounts were compromised at the times the posts were made."_ <br /> <br />This example demonstrates how threat actors can use compromised accounts to distribute inauthentic content while exploiting the legitimate account holder’s persona (T0097.110: Party Official Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0150.005: Compromised Asset). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -21,7 +21,7 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) | IT00000216 _"In the early hours of 24 May 2017, a news story appeared on the website of Qatar's official news agency, QNA, reporting that the country's emir, Sheikh Tamim bin Hamad al-Thani, had made an astonishing speech."_ <br /> <br />_"[…]_ <br /> <br />_"Qatar claimed that the QNA had been hacked. And they said the hack was designed to deliberately spread fake news about the country's leader and its foreign policies. The Qataris specifically blamed UAE, an allegation later repeated by a Washington Post report which cited US intelligence sources. The UAE categorically denied those reports._ <br /> <br />_"But the story of the emir's speech unleashed a media free-for-all. Within minutes, Saudi and UAE-owned TV networks - Al Arabiya and Sky News Arabia - picked up on the comments attributed to al-Thani. Both networks accused Qatar of funding extremist groups and of destabilising the region."_ <br /> <br />This incident demonstrates how threat actors used a compromised website to allow for an inauthentic narrative to be given a level of credibility which caused significant political fallout (T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0152.004: Website, T0150.005: Compromised). |
|
||||
| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) | IT00000216 _"In the early hours of 24 May 2017, a news story appeared on the website of Qatar's official news agency, QNA, reporting that the country's emir, Sheikh Tamim bin Hamad al-Thani, had made an astonishing speech."_ <br /> <br />_"[…]_ <br /> <br />_"Qatar claimed that the QNA had been hacked. And they said the hack was designed to deliberately spread fake news about the country's leader and its foreign policies. The Qataris specifically blamed UAE, an allegation later repeated by a Washington Post report which cited US intelligence sources. The UAE categorically denied those reports._ <br /> <br />_"But the story of the emir's speech unleashed a media free-for-all. Within minutes, Saudi and UAE-owned TV networks - Al Arabiya and Sky News Arabia - picked up on the comments attributed to al-Thani. Both networks accused Qatar of funding extremist groups and of destabilising the region."_ <br /> <br />This incident demonstrates how threat actors used a compromised website to allow for an inauthentic narrative to be given a level of credibility which caused significant political fallout (T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0152.004: Website Asset, T0150.005: Compromised Asset). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -23,8 +23,9 @@
|
||||
| --------- | ------------------------- |
|
||||
| [T0088.001 Develop AI-Generated Audio (Deepfakes)](../../generated_pages/techniques/T0088.001.md) | IT00000220 <i>“While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”</i><br><br> In this example attackers impersonated the CEO of LastPass (T0097.100: Individual Persona, T0143.003: Impersonated Persona), targeting one of its employees over WhatsApp (T0151.004: Chat Platform,T0155.007: Encrypted Communication Channel) using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). |
|
||||
| [T0097.100 Individual Persona](../../generated_pages/techniques/T0097.100.md) | IT00000221 <i>“While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”</i><br><br> In this example attackers impersonated the CEO of LastPass (T0097.100: Individual Persona, T0143.003: Impersonated Persona), targeting one of its employees over WhatsApp (T0151.004: Chat Platform,T0155.007: Encrypted Communication Channel) using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000222 <i>“While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”</i><br><br>In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). |
|
||||
| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) | IT00000219 <i>“While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”</i><br><br>In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000222 <i>“While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”</i><br><br>In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). |
|
||||
| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) | IT00000219 <i>“While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”</i><br><br>In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). |
|
||||
| [T0155.007 Encrypted Communication Channel](../../generated_pages/techniques/T0155.007.md) | IT00000547 <i>“While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”</i><br><br>In this example attackers created an account on WhatsApp which impersonated the CEO of lastpass (T0097.100: Individual Persona, T0143.003: Impersonated Persona, T0146: Account Asset, T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel). They used this asset to target an employee using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -22,12 +22,12 @@
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0085.004 Develop Document](../../generated_pages/techniques/T0085.004.md) | IT00000324 <i>“On August 16, 2022, pro-Kremlin Telegram channel Joker DPR (Джокер ДНР) published a forged letter allegedly written by Ukrainian Foreign Minister Dmytro Kuleba. In the letter, Kuleba supposedly asked relevant Polish authorities to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII.<br><br> [...]<br><br> The letter is not dated, and Dmytro Kuleba’s signature seems to be copied from a publicly available letter signed by him in 2021.”</i><br><br> In this example the Telegram channel Joker DPR published a forged letter (T0085.004: Develop Document) in which they impersonated the Ukrainian Minister of Foreign Affairs (T0097.111: Government Official Persona, T0143.003: Impersonated Persona), using Ministry letterhead (T0097.206: Government Institution Persona, T0143.003: Impersonated Persona). |
|
||||
| [T0097.101 Local Persona](../../generated_pages/techniques/T0097.101.md) | IT00000238 <i>“The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.<br><br> “In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.<br><br> “The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.<br><br> “Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.<br><br> “The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”</I><br><br> In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform). <br><br> This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. |
|
||||
| [T0097.108 Expert Persona](../../generated_pages/techniques/T0097.108.md) | IT00000239 <i>“The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.<br><br> “In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.<br><br> “The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.<br><br> “Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.<br><br> “The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”</I><br><br> In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform). <br><br> This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. |
|
||||
| [T0097.101 Local Persona](../../generated_pages/techniques/T0097.101.md) | IT00000238 <i>“The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.<br><br> “In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.<br><br> “The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.<br><br> “Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.<br><br> “The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”</I><br><br> In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform). <br><br> This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. |
|
||||
| [T0097.108 Expert Persona](../../generated_pages/techniques/T0097.108.md) | IT00000239 <i>“The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.<br><br> “In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.<br><br> “The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.<br><br> “Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.<br><br> “The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”</I><br><br> In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform). <br><br> This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. |
|
||||
| [T0097.111 Government Official Persona](../../generated_pages/techniques/T0097.111.md) | IT00000327 <i>“On August 16, 2022, pro-Kremlin Telegram channel Joker DPR (Джокер ДНР) published a forged letter allegedly written by Ukrainian Foreign Minister Dmytro Kuleba. In the letter, Kuleba supposedly asked relevant Polish authorities to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII.<br><br> [...]<br><br> The letter is not dated, and Dmytro Kuleba’s signature seems to be copied from a publicly available letter signed by him in 2021.”</i><br><br> In this example the Telegram channel Joker DPR published a forged letter (T0085.004: Develop Document) in which they impersonated the Ukrainian Minister of Foreign Affairs (T0097.111: Government Official Persona, T0143.003: Impersonated Persona), using Ministry letterhead (T0097.206: Government Institution Persona, T0143.003: Impersonated Persona). |
|
||||
| [T0097.206 Government Institution Persona](../../generated_pages/techniques/T0097.206.md) | IT00000326 <i>“On August 16, 2022, pro-Kremlin Telegram channel Joker DPR (Джокер ДНР) published a forged letter allegedly written by Ukrainian Foreign Minister Dmytro Kuleba. In the letter, Kuleba supposedly asked relevant Polish authorities to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII.<br><br> [...]<br><br> The letter is not dated, and Dmytro Kuleba’s signature seems to be copied from a publicly available letter signed by him in 2021.”</i><br><br> In this example the Telegram channel Joker DPR published a forged letter (T0085.004: Develop Document) in which they impersonated the Ukrainian Minister of Foreign Affairs (T0097.111: Government Official Persona, T0143.003: Impersonated Persona), using Ministry letterhead (T0097.206: Government Institution Persona, T0143.003: Impersonated Persona). |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000237 <i>“The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.<br><br> “In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.<br><br> “The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.<br><br> “Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.<br><br> “The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”</I><br><br> In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform). <br><br> This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. |
|
||||
| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) | IT00000236 <i>“The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.<br><br> “In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.<br><br> “The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.<br><br> “Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.<br><br> “The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”</I><br><br> In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account, T0150.005: Compromised, T0151.001: Social Media Platform). <br><br> This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000237 <i>“The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.<br><br> “In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.<br><br> “The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.<br><br> “Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.<br><br> “The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”</I><br><br> In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform). <br><br> This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. |
|
||||
| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) | IT00000236 <i>“The August 17 [2022] Telegram post [which contained a falsified letter from the Ukrainian Minister of Foreign Affairs asking Poland to rename Belwederska Street in Warsaw — the location of the Russian embassy building — as Stepan Bandera Street, in honor of the far-right nationalist who led the Ukrainian Insurgent Army during WWII] also contained screenshots of Facebook posts that appeared on two Facebook accounts belonging to Polish nationals Piotr Górka, an expert in the history of the Polish Air Force, and Dariusz Walusiak, a Polish historian and documentary maker. The Górka post suggested that he fully supported the Polish government’s decision to change Belwederska Street to Stepan Bandera Street.<br><br> “In a statement to the DFRLab, Górka said his account was accessed without his consent. “This is not my post loaded to my Facebook page,” he explained. “My site was hacked, some days ago.” At the time of publishing, Piotr Górka’s post and his Facebook account were no longer accessible.<br><br> “The post on Górka’s Facebook page was shared by Dariusz Walusiak’s Facebook account; the account also reposted it on the Facebook walls of more than twenty other Facebook users, including Adam Kalita, currently working at Krakow branch of the Institute of National Remembrance; Jan Kasprzyk, head of the Office for War Veterans and Victims of Oppression; and Alicja Kondraciuk, a Polish public figure living in Krakow.<br><br> “Walusiak’s Facebook account is also no longer accessible. Given his work on Polish history and identity, it seems highly unlikely he would support the Bandera measure; the DFRLab has also reached out to him for comment.<br><br> “The fact that Joker DPR’s Telegram post included screenshots of their Facebook posts raises the strong possibility that both Facebook accounts were compromised, and that hackers planted false statements on their pages that would seem out of character for them in order to gain further attention to the forged documents.”</I><br><br> In this example, threat actors used compromised accounts of Polish historians who have enough relevant knowledge to plausibly weigh in on the forged letter’s narrative (T0143.003: Impersonated Persona, T0097.101: Local Persona, T0097.108: Expert Persona, T0146: Account Asset, T0150.005: Compromised Asset, T0151.001: Social Media Platform). <br><br> This matches T0097.108: Expert Persona because the impersonation exploited Górka and Walusiak’s existing personas as experts in Polish history. |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -21,10 +21,10 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0097.204 Think Tank Persona](../../generated_pages/techniques/T0097.204.md) | IT00000244 <I>“The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”<br><br> [...]<br><br> “Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.<br><br> “Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”</i>In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).<br><br> Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). |
|
||||
| [T0149.004 Redirecting Domain](../../generated_pages/techniques/T0149.004.md) | IT00000243 <I>“The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”<br><br> [...]<br><br> “Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.<br><br> “Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”</i>In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).<br><br> Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). |
|
||||
| [T0150.004 Repurposed](../../generated_pages/techniques/T0150.004.md) | IT00000345 <I>“The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”<br><br> [...]<br><br> “Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.<br><br> “Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”</i>In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).<br><br> Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). |
|
||||
| [T0155.004 Geoblocked](../../generated_pages/techniques/T0155.004.md) | IT00000346 <I>“The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”<br><br> [...]<br><br> “Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.<br><br> “Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”</i>In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain, T0150.004: Repurposed).<br><br> Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website, T0155.004: Geoblocked). |
|
||||
| [T0097.204 Think Tank Persona](../../generated_pages/techniques/T0097.204.md) | IT00000244 <I>“The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”<br><br> [...]<br><br> “Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.<br><br> “Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”</i><br><br>In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).<br><br> Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). |
|
||||
| [T0149.004 Redirecting Domain Asset](../../generated_pages/techniques/T0149.004.md) | IT00000243 <I>“The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”<br><br> [...]<br><br> “Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.<br><br> “Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”</i><br><br>In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).<br><br> Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). |
|
||||
| [T0150.004 Repurposed Asset](../../generated_pages/techniques/T0150.004.md) | IT00000345 <I>“The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”<br><br> [...]<br><br> “Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.<br><br> “Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”</i><br><br>In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).<br><br> Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). |
|
||||
| [T0155.004 Geoblocked Asset](../../generated_pages/techniques/T0155.004.md) | IT00000346 <I>“The creator of Geopolitika[.]ru is Aleksandr Dugin, who was sanctioned by the United States Department of Treasury in 2015 for his role in the Eurasian Youth Union “for being responsible for or complicit in actions or policies that threaten the peace, security, stability, or sovereignty or territorial integrity of Ukraine.”<br><br> [...]<br><br> “Currently, the website geopolika[.]ru redirects directly to another partner website, Katehon.<br><br> “Katehon poses itself as a think tank focused on geopolitics in an English edition of its website. In contrast, in Russian, it states its aim to develop “ideological, political, diplomatic, economic and military strategy for Russia of the future” with a special role of religion. The president of Katehon’s supervisory board is Konstantin Malofeev, a Russian millionaire with connections to the Russian orthodox church and presidential administration, who founded Tsargrad TV, a known source of disinformation. Malofeev was sanctioned by the U.S. Department of Treasury and the European Union in 2014 for material support and financial backing of Russian-backed separatists in eastern Ukraine. Another known figure from the board is Sergei Glaziev, former advisor to Putin in 2012–2019. Dugin is also on the board in the Russian edition of the website, whereas he is omitted in English.”</i><br><br>In this example a domain managed by an actor previously sanctioned by the US department of treasury has been reconfigured to redirect to another website; Katehon (T0149.004: Redirecting Domain Asset, T0150.004: Repurposed Asset).<br><br> Katehon presents itself as a geopolitical think tank in English, but does not maintain this persona when presenting itself to a Russian speaking audience (T0097.204: Think Tank Persona, T0152.004: Website Asset, T0155.004: Geoblocked Asset). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -24,7 +24,7 @@
|
||||
| [T0097.106 Recruiter Persona](../../generated_pages/techniques/T0097.106.md) | IT00000248 <i>“A few press investigations have alluded to the [Russia’s Internet Research Agency]’s job ads. The extent of the human asset recruitment strategy is revealed in the organic data set. It is expansive, and was clearly a priority. Posts encouraging Americans to perform various types of tasks for IRA handlers appeared in Black, Left, and Right-targeted groups, though they were most numerous in the Black community. They included:<br> <br>- Requests for contact with preachers from Black churches (Black_Baptist_Church) <br>- Offers of free counsellingcounseling to people with sexual addiction (Army of Jesus) <br>- Soliciting volunteers to hand out fliers <br>- Soliciting volunteers to teach self-defense classes <br>- Offering free self-defense classes (Black Fist/Fit Black) <br>- Requests for followers to attend political rallies <br>- Requests for photographers to document protests <br>- Requests for speakers at protests <br>- Requests to protest the Westborough Baptist Church (LGBT United) <br>- Job offers for designers to help design fliers, sites, Facebook sticker packs <br>- Requests for female followers to send photos for a calendar <br>- Requests for followers to send photos to be shared to the Page (Back the Badge) <br>- Soliciting videos for a YouTube contest called “Pee on Hillary” <br>- Encouraging people to apply to be part of a Black reality TV show <br>- Posting a wide variety of job ads (write for BlackMattersUS and others) <br>- Requests for lawyers to volunteer to assist with immigration cases”</i> <br><br> This behaviour matches T0097.106: Recruiter Persona because the threat actors are presenting tasks for their target audience to complete in the style of a job posting (even though some of the tasks were presented as voluntary / unpaid efforts), including calls for people to attend political rallies (T0126.001: Call to Action to Attend). |
|
||||
| [T0097.202 News Outlet Persona](../../generated_pages/techniques/T0097.202.md) | IT00000250 <i>“The Black Matters Facebook Page [operated by Russia’s Internet Research Agency] explored several visual brand identities, moving from a plain logo to a gothic typeface on Jan 19th, 2016. On February 4th, 2016, the person who ran the Facebook Page announced the launch of the website, blackmattersus[.]com, emphasizing media distrust and a desire to build Black independent media; [“I DIDN’T BELIEVE THE MEDIA / SO I BECAME ONE”]”<i><br><br> In this example an asset controlled by Russia’s Internet Research Agency began to present itself as a source of “Black independent media”, claiming that the media could not be trusted (T0097.208: Social Cause Persona, T0097.202: News Outlet Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0097.204 Think Tank Persona](../../generated_pages/techniques/T0097.204.md) | IT00000245 <I>“[Russia’s Internet Research Agency, the IRA] pushed narratives with longform blog content. They created media properties, websites designed to produce stories that would resonate with those targeted. It appears, based on the data set provided by Alphabet, that the IRA may have also expanded into think tank-style communiques. One such page, previously unattributed to the IRA but included in the Alphabet data, was GI Analytics, a geopolitics blog with an international masthead that included American authors. This page was promoted via AdWords and YouTube videos; it has strong ties to more traditional Russian propaganda networks, which will be discussed later in this analysis. GI Analytics wrote articles articulating nuanced academic positions on a variety of sophisticated topics. From the site’s About page:<br><br> ““Our purpose and mission are to provide high-quality analysis at a time when we are faced with a multitude of crises, a collapsing global economy, imperialist wars, environmental disasters, corporate greed, terrorism, deceit, GMO food, a migration crisis and a crackdown on small farmers and ranchers.””</i><br><br> In this example Alphabet’s technical indicators allowed them to assert that GI Analytics, which presented itself as a think tank, was a fabricated institution associated with Russia’s Internet Research Agency (T0097.204: Think Tank Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0097.208 Social Cause Persona](../../generated_pages/techniques/T0097.208.md) | IT00000251 <i>“The Black Matters Facebook Page [operated by Russia’s Internet Research Agency] explored several visual brand identities, moving from a plain logo to a gothic typeface on Jan 19th, 2016. On February 4th, 2016, the person who ran the Facebook Page announced the launch of the website, blackmattersus[.]com, emphasizing media distrust and a desire to build Black independent media; [“I DIDN’T BELIEVE THE MEDIA / SO I BECAME ONE”]”<i><br><br> In this example an asset controlled by Russia’s Internet Research Agency began to present itself as a source of “Black independent media”, claiming that the media could not be trusted (T0097.208: Social Cause Persona, T0097.202: News Outlet Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0097.208 Social Cause Persona](../../generated_pages/techniques/T0097.208.md) | IT00000251 <i>“The Black Matters Facebook Page [operated by Russia’s Internet Research Agency] explored several visual brand identities, moving from a plain logo to a gothic typeface on Jan 19th, 2016. On February 4th, 2016, the person who ran the Facebook Page announced the launch of the website, blackmattersus[.]com, emphasizing media distrust and a desire to build Black independent media; [“I DIDN’T BELIEVE THE MEDIA / SO I BECAME ONE”]”</i><br><br> In this example an asset controlled by Russia’s Internet Research Agency began to present itself as a source of “Black independent media”, claiming that the media could not be trusted (T0097.208: Social Cause Persona, T0097.202: News Outlet Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0126.001 Call to Action to Attend](../../generated_pages/techniques/T0126.001.md) | IT00000247 <i>“A few press investigations have alluded to the [Russia’s Internet Research Agency]’s job ads. The extent of the human asset recruitment strategy is revealed in the organic data set. It is expansive, and was clearly a priority. Posts encouraging Americans to perform various types of tasks for IRA handlers appeared in Black, Left, and Right-targeted groups, though they were most numerous in the Black community. They included:<br> <br>- Requests for contact with preachers from Black churches (Black_Baptist_Church) <br>- Offers of free counsellingcounseling to people with sexual addiction (Army of Jesus) <br>- Soliciting volunteers to hand out fliers <br>- Soliciting volunteers to teach self-defense classes <br>- Offering free self-defense classes (Black Fist/Fit Black) <br>- Requests for followers to attend political rallies <br>- Requests for photographers to document protests <br>- Requests for speakers at protests <br>- Requests to protest the Westborough Baptist Church (LGBT United) <br>- Job offers for designers to help design fliers, sites, Facebook sticker packs <br>- Requests for female followers to send photos for a calendar <br>- Requests for followers to send photos to be shared to the Page (Back the Badge) <br>- Soliciting videos for a YouTube contest called “Pee on Hillary” <br>- Encouraging people to apply to be part of a Black reality TV show <br>- Posting a wide variety of job ads (write for BlackMattersUS and others) <br>- Requests for lawyers to volunteer to assist with immigration cases”</i> <br><br> This behaviour matches T0097.106: Recruiter Persona because the threat actors are presenting tasks for their target audience to complete in the style of a job posting (even though some of the tasks were presented as voluntary / unpaid efforts), including calls for people to attend political rallies (T0126.001: Call to Action to Attend). |
|
||||
| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000249 <i>“The Black Matters Facebook Page [operated by Russia’s Internet Research Agency] explored several visual brand identities, moving from a plain logo to a gothic typeface on Jan 19th, 2016. On February 4th, 2016, the person who ran the Facebook Page announced the launch of the website, blackmattersus[.]com, emphasizing media distrust and a desire to build Black independent media; [“I DIDN’T BELIEVE THE MEDIA / SO I BECAME ONE”]”<i><br><br> In this example an asset controlled by Russia’s Internet Research Agency began to present itself as a source of “Black independent media”, claiming that the media could not be trusted (T0097.208: Social Cause Persona, T0097.202: News Outlet Persona, T0143.002: Fabricated Persona). |
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
| [T0131 Exploit TOS/Content Moderation](../../generated_pages/techniques/T0131.md) | IT00000338 <i>“After the European Union banned Kremlin-backed media outlets and social media giants demoted their posts for peddling falsehoods about the war in Ukraine, Moscow has turned to its cadre of diplomats, government spokespeople and ministers — many of whom have extensive followings on social media — to promote disinformation about the conflict in Eastern Europe, according to four EU and United States officials.”</i><br><br>In this example authentic Russian government officials used their own accounts to promote false narratives (T0143.001: Authentic Persona, T0097.111: Government Official Persona).<br><br>The use of accounts managed by authentic Government / Diplomats to spread false narratives makes it harder for platforms to enforce content moderation, because of the political ramifications they may face for censoring elected officials (T0131: Exploit TOS/Content Moderation). For example, Twitter previously argued that official channels of world leaders are not removed due to the high public interest associated with their activities. |
|
||||
| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) | IT00000336 <i>“After the European Union banned Kremlin-backed media outlets and social media giants demoted their posts for peddling falsehoods about the war in Ukraine, Moscow has turned to its cadre of diplomats, government spokespeople and ministers — many of whom have extensive followings on social media — to promote disinformation about the conflict in Eastern Europe, according to four EU and United States officials.”</i><br><br>In this example authentic Russian government officials used their own accounts to promote false narratives (T0143.001: Authentic Persona, T0097.111: Government Official Persona).<br><br>The use of accounts managed by authentic Government / Diplomats to spread false narratives makes it harder for platforms to enforce content moderation, because of the political ramifications they may face for censoring elected officials (T0131: Exploit TOS/Content Moderation). For example, Twitter previously argued that official channels of world leaders are not removed due to the high public interest associated with their activities. |
|
||||
| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) | IT00000294 <I>“[Russia’s social media] reach isn't the same as Russian state media, but they are trying to recreate what RT and Sputnik had done," said one EU official involved in tracking Russian disinformation. "It's a coordinated effort that goes beyond social media and involves specific websites."<br><br> “Central to that wider online playbook is a Telegram channel called Warfakes and an affiliated website. Since the beginning of the conflict, that social media channel has garnered more than 725,000 members and repeatedly shares alleged fact-checks aimed at debunking Ukrainian narratives, using language similar to Western-style fact-checking outlets.”</i><br><br> In this example a Telegram channel (T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel) was established which presented itself as a source of fact checks (T0097.203: Fact Checking Organisation Persona). |
|
||||
| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) | IT00000548 <I>“[Russia’s social media] reach isn't the same as Russian state media, but they are trying to recreate what RT and Sputnik had done," said one EU official involved in tracking Russian disinformation. "It's a coordinated effort that goes beyond social media and involves specific websites."<br><br> “Central to that wider online playbook is a Telegram channel called Warfakes and an affiliated website. Since the beginning of the conflict, that social media channel has garnered more than 725,000 members and repeatedly shares alleged fact-checks aimed at debunking Ukrainian narratives, using language similar to Western-style fact-checking outlets.”</i><br><br> In this example a Telegram channel (T0151.004: Chat Platform, T0155.007: Encrypted Communication Channel) was established which presented itself as a source of fact checks (T0097.203: Fact Checking Organisation Persona). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -21,6 +21,7 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0097.109 Romantic Suitor Persona](../../generated_pages/techniques/T0097.109.md) | IT00000313 <I>“On Facebook, Rita, Alona and Christina appeared to be just like the millions of other U.S citizens sharing their lives with the world. They discussed family outings, shared emojis and commented on each other's photographs.<br><br> “In reality, the three accounts were part of a highly-targeted cybercrime operation, used to spread malware that was able to steal passwords and spy on victims.<br><br> “Hackers with links to Lebanon likely ran the covert scheme using a strain of malware dubbed "Tempting Cedar Spyware," according to researchers from Prague-based anti-virus company Avast, which detailed its findings in a report released on Wednesday.<br><br> “In a honey trap tactic as old as time, the culprits' targets were mostly male, and lured by fake attractive women. <br><br> “In the attack, hackers would send flirtatious messages using Facebook to the chosen victims, encouraging them to download a second , booby-trapped, chat application known as Kik Messenger to have "more secure" conversations. Upon analysis, Avast experts found that "many fell for the trap.””</i><br><br> In this example threat actors took on the persona of a romantic suitor on Facebook, directing their targets to another platform (T0097:109 Romantic Suitor Persona, T0145.006: Attractive Person Account Imagery, T0143.002: Fabricated Persona). |
|
||||
| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000314 <I>“On Facebook, Rita, Alona and Christina appeared to be just like the millions of other U.S citizens sharing their lives with the world. They discussed family outings, shared emojis and commented on each other's photographs.<br><br> “In reality, the three accounts were part of a highly-targeted cybercrime operation, used to spread malware that was able to steal passwords and spy on victims.<br><br> “Hackers with links to Lebanon likely ran the covert scheme using a strain of malware dubbed "Tempting Cedar Spyware," according to researchers from Prague-based anti-virus company Avast, which detailed its findings in a report released on Wednesday.<br><br> “In a honey trap tactic as old as time, the culprits' targets were mostly male, and lured by fake attractive women. <br><br> “In the attack, hackers would send flirtatious messages using Facebook to the chosen victims, encouraging them to download a second , booby-trapped, chat application known as Kik Messenger to have "more secure" conversations. Upon analysis, Avast experts found that "many fell for the trap.””</i><br><br> In this example threat actors took on the persona of a romantic suitor on Facebook, directing their targets to another platform (T0097:109 Romantic Suitor Persona, T0145.006: Attractive Person Account Imagery, T0143.002: Fabricated Persona). |
|
||||
| [T0145.006 Attractive Person Account Imagery](../../generated_pages/techniques/T0145.006.md) | IT00000312 <I>“On Facebook, Rita, Alona and Christina appeared to be just like the millions of other U.S citizens sharing their lives with the world. They discussed family outings, shared emojis and commented on each other's photographs.<br><br> “In reality, the three accounts were part of a highly-targeted cybercrime operation, used to spread malware that was able to steal passwords and spy on victims.<br><br> “Hackers with links to Lebanon likely ran the covert scheme using a strain of malware dubbed "Tempting Cedar Spyware," according to researchers from Prague-based anti-virus company Avast, which detailed its findings in a report released on Wednesday.<br><br> “In a honey trap tactic as old as time, the culprits' targets were mostly male, and lured by fake attractive women. <br><br> “In the attack, hackers would send flirtatious messages using Facebook to the chosen victims, encouraging them to download a second , booby-trapped, chat application known as Kik Messenger to have "more secure" conversations. Upon analysis, Avast experts found that "many fell for the trap.””</i><br><br> In this example threat actors took on the persona of a romantic suitor on Facebook, directing their targets to another platform (T0097:109 Romantic Suitor Persona, T0145.006: Attractive Person Account Imagery, T0143.002: Fabricated Persona). |
|
||||
|
||||
|
@ -21,9 +21,9 @@
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0097.111 Government Official Persona](../../generated_pages/techniques/T0097.111.md) | IT00000343 <i>“On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.<br><br> “The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account, T0150.001: Newly Created, T0150.005: Compromised).<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> ““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> “That is false.<br><br> “The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.<br><br> “The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””</i><br><br> In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). |
|
||||
| [T0129.006 Deny Involvement](../../generated_pages/techniques/T0129.006.md) | IT00000344 <i>“On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.<br><br> “The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account, T0150.001: Newly Created, T0150.005: Compromised).<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> ““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> “That is false.<br><br> “The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.<br><br> “The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””</i><br><br> In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). |
|
||||
| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) | IT00000342 <i>“On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.<br><br> “The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account, T0150.001: Newly Created, T0150.005: Compromised).<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> ““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> “That is false.<br><br> “The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.<br><br> “The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””</i><br><br> In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). |
|
||||
| [T0097.111 Government Official Persona](../../generated_pages/techniques/T0097.111.md) | IT00000343 <i>“On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.<br><br> “The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account Asset, T0150.001: Newly Created Asset, T0150.005: Compromised Asset).<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> ““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> “That is false.<br><br> “The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.<br><br> “The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””</i><br><br> In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). |
|
||||
| [T0129.006 Deny Involvement](../../generated_pages/techniques/T0129.006.md) | IT00000344 <i>“On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.<br><br> “The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account Asset, T0150.001: Newly Created Asset, T0150.005: Compromised Asset).<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> ““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> “That is false.<br><br> “The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.<br><br> “The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””</i><br><br> In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). |
|
||||
| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) | IT00000342 <i>“On October 23, Canada’s Foreign Ministry said it had discovered a disinformation campaign, likely tied to China, aimed at discrediting dozens of Canadian politicians, including Prime Minister Justin Trudeau.<br><br> “The ministry said the campaign took place in August and September. It used new and hijacked social media accounts to bulk-post messages targeting Canadian politicians (T0146: Account Asset, T0150.001: Newly Created Asset, T0150.005: Compromised Asset).<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> ““Canada was a downright liar and disseminator of false information… Beijing has never meddled in another nation’s domestic affairs.”<br><br> “A Chinese Embassy in Canada spokesperson dismissed Canada’s accusation as baseless.<br><br> “That is false.<br><br> “The Canadian government's report is based on an investigation conducted by its Rapid Response Mechanism cyber intelligence unit in cooperation with the social media platforms.<br><br> “The investigation exposed China’s disinformation campaign dubbed “Spamouflage” -- for its tactic of using “a network of new or hijacked social media accounts that posts and increases the number of propaganda messages across multiple social media platforms – including Facebook, X/Twitter, Instagram, YouTube, Medium, Reddit, TikTok, and LinkedIn.””</i><br><br> In this case a network of accounts attributed to China were identified operating on multiple platforms. The report was dismissed as false information by an official in the Chinese Embassy in Canada (T0143.001: Authentic Persona, T0097.111: Government Official Persona, T0129.006: Deny Involvement). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,19 +15,21 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.computerweekly.com/news/366579820/China-ramps-up-use-of-AI-misinformation](https://www.computerweekly.com/news/366579820/China-ramps-up-use-of-AI-misinformation) | 2024/04/05 |
|
||||
Alex Scroxton | ComputerWeekly | [https://web.archive.org/web/20240405154259/https://www.computerweekly.com/news/366579820/China-ramps-up-use-of-AI-misinformation](https://web.archive.org/web/20240405154259/https://www.computerweekly.com/news/366579820/China-ramps-up-use-of-AI-misinformation) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0087.001 Develop AI-Generated Videos (Deepfakes)](../../generated_pages/techniques/T0087.001.md) | IT00000352 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0088.001 Develop AI-Generated Audio (Deepfakes)](../../generated_pages/techniques/T0088.001.md) | IT00000347 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) | IT00000354 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0097.110 Party Official Persona](../../generated_pages/techniques/T0097.110.md) | IT00000348 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0115 Post Content](../../generated_pages/techniques/T0115.md) | IT00000350 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000353 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0152.006 Video Platform](../../generated_pages/techniques/T0152.006.md) | IT00000349 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) | IT00000351 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0087.001 Develop AI-Generated Videos (Deepfakes)](../../generated_pages/techniques/T0087.001.md) | IT00000352 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0088.001 Develop AI-Generated Audio (Deepfakes)](../../generated_pages/techniques/T0088.001.md) | IT00000347 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) | IT00000354 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0097.110 Party Official Persona](../../generated_pages/techniques/T0097.110.md) | IT00000348 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0115 Post Content](../../generated_pages/techniques/T0115.md) | IT00000350 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000353 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0152.006 Video Platform](../../generated_pages/techniques/T0152.006.md) | IT00000349 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) | IT00000351 The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,6 +15,7 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://au.reset.tech/news/report-not-just-algorithms/](https://au.reset.tech/news/report-not-just-algorithms/) | 2024/03/24 | - | Reset Australia | [https://web.archive.org/web/20240527135516/https://au.reset.tech/news/report-not-just-algorithms/](https://web.archive.org/web/20240527135516/https://au.reset.tech/news/report-not-just-algorithms/) |
|
||||
|
||||
|
||||
|
||||
|
@ -15,14 +15,15 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://bhr.stern.nyu.edu/wp-content/uploads/2024/01/NYUCBHRGaming_ONLINEUPDATEDMay16.pdf](https://bhr.stern.nyu.edu/wp-content/uploads/2024/01/NYUCBHRGaming_ONLINEUPDATEDMay16.pdf) | 2023/05/01 | Mariana Olaizola Rosenblat, Paul M. Barrett | NYU Stern | [https://web.archive.org/web/20240912223613/https://bhr.stern.nyu.edu/wp-content/uploads/2024/01/NYUCBHRGaming_ONLINEUPDATEDMay16.pdf](https://web.archive.org/web/20240912223613/https://bhr.stern.nyu.edu/wp-content/uploads/2024/01/NYUCBHRGaming_ONLINEUPDATEDMay16.pdf) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0147.001 Game](../../generated_pages/techniques/T0147.001.md) | IT00000360 This report looks at how extremists exploit games and gaming adjacent platforms:<br><br><i>Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”<br><br>While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users. <br><br>A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.<br><br>Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.</i><br><br>White supremacists created a game aligned with their ideology (T0147.001: Game). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod). Extremists also use communication features available in online games to recruit new members. |
|
||||
| [T0147.002 Game Mod](../../generated_pages/techniques/T0147.002.md) | IT00000362 This report looks at how extremists exploit games and gaming adjacent platforms:<br><br><i>Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”<br><br>While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users. <br><br>A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.<br><br>Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.</i><br><br>White supremacists created a game aligned with their ideology (T0147.001: Game). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod). Extremists also use communication features available in online games to recruit new members. |
|
||||
| [T0151.015 Online Game Platform](../../generated_pages/techniques/T0151.015.md) | IT00000361 This report looks at how extremists exploit games and gaming adjacent platforms:<br><br><i>Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”<br><br>While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users. <br><br>A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.<br><br>Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.</i><br><br>White supremacists created a game aligned with their ideology (T0147.001: Game). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod). Extremists also use communication features available in online games to recruit new members. |
|
||||
| [T0147.001 Game Asset](../../generated_pages/techniques/T0147.001.md) | IT00000360 This report looks at how extremists exploit games and gaming adjacent platforms:<br><br><i>Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”<br><br>While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users. <br><br>A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.<br><br>Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.</i><br><br>White supremacists created a game aligned with their ideology (T0147.001: Game Asset). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod Asset). Extremists also use communication features available in online games to recruit new members. |
|
||||
| [T0147.002 Game Mod Asset](../../generated_pages/techniques/T0147.002.md) | IT00000362 This report looks at how extremists exploit games and gaming adjacent platforms:<br><br><i>Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”<br><br>While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users. <br><br>A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.<br><br>Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.</i><br><br>White supremacists created a game aligned with their ideology (T0147.001: Game Asset). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod Asset). Extremists also use communication features available in online games to recruit new members. |
|
||||
| [T0151.015 Online Game Platform](../../generated_pages/techniques/T0151.015.md) | IT00000361 This report looks at how extremists exploit games and gaming adjacent platforms:<br><br><i>Ethnic Cleansing, a first-person shooter game created by the U.S. white supremacist organization National Alliance in 2002, was advertised as follows: “[T]he Race War has already begun. Your character, Will, runs through a ghetto blasting away at various blacks and spics to attempt to gain entrance to the subway system...where the jews have hidden to avoid the carnage. Then you get to blow away jews as they scream ‘Oy Vey!’ on your way to the command center.”<br><br>While games like Ethnic Cleansing —and others of a similar genocidal variety, including titles like Shoot the Blacks, Hatred, and Muslim Massacre —generate some press attention and controversy, they tend to be ignored by the large majority of gamers, who consider them “crude” and “badly designed.” Nevertheless, these games are still available in some corners of the Internet and have been downloaded by hundreds of thousands of users. <br><br>A second strategy involves the modification (“modding”) of existing video games to twist the narrative into an extremist message or fantasy. One example is the Islamic State terrorist group’s modding of the first-person shooter video game, ARMA III, to make Islamic fighters the heroic protagonists rather than the villains. With their powerful immersive quality, these video games have, in some instances, been effective at inspiring and allegedly training extremists to perpetrate realworld attacks.<br><br>Third, extremist actors have used in-game chat functions to open lines of communication with ideological sympathizers and potential recruits. Although the lack of in-game communication data has made it impossible for academic researchers to track the presence of extremists in a systematic way, there is enough anecdotal evidence—including evidence obtained from police investigation files—to infer that in-game chatrooms can and do function as “radicalization funnels” in at least some cases.</i><br><br>White supremacists created a game aligned with their ideology (T0147.001: Game Asset). The Islamic State terrorist group created a mod of the game ARMA 3 (T0151.015: Online Game Platform, T0147.002: Game Mod Asset). Extremists also use communication features available in online games to recruit new members. |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,15 +15,16 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.vogue.co.uk/news/article/stop-deepfakes-campaign](https://www.vogue.co.uk/news/article/stop-deepfakes-campaign) | 2021/03/15 | Sophie Compton | Vogue | [https://web.archive.org/web/20230515171651/https://www.vogue.co.uk/news/article/stop-deepfakes-campaign](https://web.archive.org/web/20230515171651/https://www.vogue.co.uk/news/article/stop-deepfakes-campaign) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0086.002 Develop AI-Generated Images (Deepfakes)](../../generated_pages/techniques/T0086.002.md) | IT00000364 <i>Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.<br><br>[...]<br><br>Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.<br><br>Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.<br><br>[...]<br><br>Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.<br><br>“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?</i><br><br>A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). <br><br>Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). |
|
||||
| [T0152.004 Website](../../generated_pages/techniques/T0152.004.md) | IT00000365 <i>Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.<br><br>[...]<br><br>Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.<br><br>Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.<br><br>[...]<br><br>Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.<br><br>“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?</i><br><br>A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). <br><br>Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). |
|
||||
| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) | IT00000363 <i>Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.<br><br>[...]<br><br>Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.<br><br>Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.<br><br>[...]<br><br>Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.<br><br>“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?</i><br><br>A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). <br><br>Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). |
|
||||
| [T0155.005 Paid Access](../../generated_pages/techniques/T0155.005.md) | IT00000366 <i>Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.<br><br>[...]<br><br>Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.<br><br>Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.<br><br>[...]<br><br>Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.<br><br>“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?</i><br><br>A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). <br><br>Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). |
|
||||
| [T0086.002 Develop AI-Generated Images (Deepfakes)](../../generated_pages/techniques/T0086.002.md) | IT00000364 <i>Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.<br><br>[...]<br><br>Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.<br><br>Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.<br><br>[...]<br><br>Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.<br><br>“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?</i><br><br>A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). <br><br>Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). |
|
||||
| [T0152.004 Website Asset](../../generated_pages/techniques/T0152.004.md) | IT00000365 <i>Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.<br><br>[...]<br><br>Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.<br><br>Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.<br><br>[...]<br><br>Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.<br><br>“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?</i><br><br>A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). <br><br>Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). |
|
||||
| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) | IT00000363 <i>Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.<br><br>[...]<br><br>Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.<br><br>Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.<br><br>[...]<br><br>Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.<br><br>“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?</i><br><br>A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). <br><br>Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). |
|
||||
| [T0155.005 Paid Access Asset](../../generated_pages/techniques/T0155.005.md) | IT00000366 <i>Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.<br><br>[...]<br><br>Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.<br><br>Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.<br><br>[...]<br><br>Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.<br><br>“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?</i><br><br>A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). <br><br>Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,6 +15,7 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://venturebeat.com/media/why-thispersondoesnotexist-and-its-copycats-need-to-be-restricted/](https://venturebeat.com/media/why-thispersondoesnotexist-and-its-copycats-need-to-be-restricted/) | 2019/03/03 | Adam Ghahramani | VentureBeat | [https://web.archive.org/web/20240822125555/https://venturebeat.com/media/why-thispersondoesnotexist-and-its-copycats-need-to-be-restricted/](https://web.archive.org/web/20240822125555/https://venturebeat.com/media/why-thispersondoesnotexist-and-its-copycats-need-to-be-restricted/) |
|
||||
|
||||
|
||||
|
||||
|
@ -15,15 +15,16 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://cepa.org/article/pro-putin-disinformation-warriors-take-war-of-aggression-to-reddit/](https://cepa.org/article/pro-putin-disinformation-warriors-take-war-of-aggression-to-reddit/) | 2023/12/12 | Aliide Naylor | CEPA | [https://web.archive.org/web/20240813063338/https://cepa.org/web/20240813063338/https://cepa.org/article/pro-putin-disinformation-warriors-take-war-of-aggression-to-reddit/](https://web.archive.org/web/20240813063338/https://cepa.org/web/20240813063338/https://cepa.org/article/pro-putin-disinformation-warriors-take-war-of-aggression-to-reddit/) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0124 Suppress Opposition](../../generated_pages/techniques/T0124.md) | IT00000373 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:<br><br><i>The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.<br><br>Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.<br><br>The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”<br><br>When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.<br><br>Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”</i><br><br>A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). |
|
||||
| [T0146.004 Administrator Account](../../generated_pages/techniques/T0146.004.md) | IT00000372 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:<br><br><i>The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.<br><br>Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.<br><br>The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”<br><br>When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.<br><br>Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”</i><br><br>A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). |
|
||||
| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) | IT00000371 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:<br><br><i>The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.<br><br>Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.<br><br>The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”<br><br>When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.<br><br>Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”</i><br><br>A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). |
|
||||
| [T0151.011 Community Sub-Forum](../../generated_pages/techniques/T0151.011.md) | IT00000370 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:<br><br><i>The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.<br><br>Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.<br><br>The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”<br><br>When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.<br><br>Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”</i><br><br>A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). |
|
||||
| [T0124 Suppress Opposition](../../generated_pages/techniques/T0124.md) | IT00000373 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:<br><br><i>The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.<br><br>Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.<br><br>The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”<br><br>When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.<br><br>Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”</i><br><br>A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). |
|
||||
| [T0146.004 Administrator Account Asset](../../generated_pages/techniques/T0146.004.md) | IT00000372 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:<br><br><i>The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.<br><br>Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.<br><br>The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”<br><br>When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.<br><br>Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”</i><br><br>A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). |
|
||||
| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) | IT00000371 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:<br><br><i>The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.<br><br>Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.<br><br>The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”<br><br>When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.<br><br>Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”</i><br><br>A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). |
|
||||
| [T0151.011 Community Sub-Forum](../../generated_pages/techniques/T0151.011.md) | IT00000370 This report looks at changes content posted to communities on Reddit (called Subreddits) after teams of voluntary moderators are replaced with what appear to be pro-Russian voices:<br><br><i>The r/antiwar subreddit appears to be a very recent takeover target. With 12,900 members it is not the largest community on Reddit, but this does place it squarely within the top 5% of all communities in terms of membership.<br><br>Three months ago a new moderator team was instated by subreddit head u/democracy101. Any posts documenting Russian aggression in Ukraine are now swiftly removed, while the board has been flooded with posts about how Ukraine is losing, or how American “neocons wrecked” the country.<br><br>The pinned post from moderator u/n0ahbody proclaims: “People who call for an end to Russian aggressions but not the Western aggressions Russia is reacting to don’t really want peace.” This user takes the view that any negative opinion about Russia is “shaped by what the fanatically Russophobic MSM wants you to think,” and that the United States is not threatened by its neighbors. Russia is.”<br><br>When u/n0ahbody took over the sub, the user posted a triumphant and vitriolic diatribe in another pro-Russia subreddit with some 33,500 members, r/EndlessWar. “We are making progress. We are purging the sub of all NAFO and NAFO-adjacent elements. Hundreds of them have been banned over the last 24 hours for various rule infractions, for being NAFO or NAFO-adjacent,” the user said, referencing the grassroots, pro-Ukrainian North Atlantic Fella Organization (NAFO) meme movement.<br><br>Several former users have reported they have indeed been banned from r/antiwar since the change in moderators. “If this subreddit cannot be explicitly against the invasion of Ukraine it will never truly be anti-war,” wrote one user Halcyon_Rein, in the antiwar subreddit on September 6. They then edited the post to say, “Edit: btw, I got f**king banned for this 💀💀💀”</i><br><br>A community hosted on Reddit was taken over by new moderators (T0151.011: Community Sub-Forum, T0150.005: Compromised Asset). These moderators removed content posted to the community which favoured Ukraine over Russia (T0146.004: Administrator Account Asset, T0151.011: Community Sub-Forum, T0124: Suppress Opposition). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,21 +15,22 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.bellingcat.com/news/americas/2019/04/28/ignore-the-poway-synagogue-shooters-manifesto-pay-attention-to-8chans-pol-board/](https://www.bellingcat.com/news/americas/2019/04/28/ignore-the-poway-synagogue-shooters-manifesto-pay-attention-to-8chans-pol-board/) | 2019/04/28 | Robert Evans | bellingcat | [https://web.archive.org/web/20240929180218/https://www.bellingcat.com/news/americas/2019/04/28/ignore-the-poway-synagogue-shooters-manifesto-pay-attention-to-8chans-pol-board/](https://web.archive.org/web/20240929180218/https://www.bellingcat.com/news/americas/2019/04/28/ignore-the-poway-synagogue-shooters-manifesto-pay-attention-to-8chans-pol-board/) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0084.004 Appropriate Content](../../generated_pages/techniques/T0084.004.md) | IT00000377 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0085.004 Develop Document](../../generated_pages/techniques/T0085.004.md) | IT00000378 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0101 Create Localised Content](../../generated_pages/techniques/T0101.md) | IT00000375 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) | IT00000382 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0129.006 Deny Involvement](../../generated_pages/techniques/T0129.006.md) | IT00000383 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0146.006 Open Access Platform](../../generated_pages/techniques/T0146.006.md) | IT00000376 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000381 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0151.012 Image Board Platform](../../generated_pages/techniques/T0151.012.md) | IT00000374 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0152.005 Paste Platform](../../generated_pages/techniques/T0152.005.md) | IT00000380 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0152.010 File Hosting Platform](../../generated_pages/techniques/T0152.010.md) | IT00000379 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0084.004 Appropriate Content](../../generated_pages/techniques/T0084.004.md) | IT00000377 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0085.004 Develop Document](../../generated_pages/techniques/T0085.004.md) | IT00000378 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0101 Create Localised Content](../../generated_pages/techniques/T0101.md) | IT00000375 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) | IT00000382 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0129.006 Deny Involvement](../../generated_pages/techniques/T0129.006.md) | IT00000383 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0146.006 Open Access Platform](../../generated_pages/techniques/T0146.006.md) | IT00000376 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000381 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0151.012 Image Board Platform](../../generated_pages/techniques/T0151.012.md) | IT00000374 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0152.005 Paste Platform](../../generated_pages/techniques/T0152.005.md) | IT00000380 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
| [T0152.010 File Hosting Platform](../../generated_pages/techniques/T0152.010.md) | IT00000379 <i>On April 27, 2019, at around 11:30 a.m. local time, a young man with a semi-automatic rifle walked into the Chabad of Poway Synagogue in Poway, California. He opened fire, killing one worshipper and wounding three others. In the hours since the shooting, a manifesto, believed to be written by the shooter, began circulating online. Evidence has also surfaced that, like the Christchurch Mosque shooter, this killer began his rampage with a post on 8chan’s /pol/ board.<br><br>Although both of these attacks may seem different, since they targeted worshippers of different faiths, both shooters were united by the same fascist ideology. They were also both radicalized in the same place: 8chan’s /pol/ board.<br><br>This has been corroborated by posts on the board itself, where “anons,” as the posters call themselves, recirculated the shooter’s since-deleted post. In it, the alleged shooter claims to have been “lurking” on the site for a year and a half. He includes a link to a livestream of his rampage — which thankfully does not appear to have worked — and he also includes a pastebin link to his manifesto.<br><br>The very first response to his announcement was another anon cheering him on and telling him to “get the high score,” AKA, kill a huge number of people.</i><br><br>Before carrying out a mass shooting, the shooter posted a thread to 8chan’s /pol/ board. The post directed users to a variety of different platforms (T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms); a Facebook account on which the shooter attempted to livestream the shooting (T0146: Account Asset, T0151.001: Social Media Platform); and a manifesto they had written hosted on pastebin (T0146.006: Open Access Platform, T0152.005: Paste Platform, T0115: Post Content) and uploaded to the file sharing platform Mediafire (T0152.010: File Hosting Platform, T0085.004: Develop Document).<br><br>The report looks deeper into 8chan’s /pol/ board:<br><br><i>8chan is a large website, which includes a number of different discussion groups about everything from anime to left-wing politics. /pol/ is one particularly active board on the website, and it is best described as a gathering place for extremely online neo-Nazis.<br><br>[...]<br><br>I’ve browsed /pol/ on an almost daily basis since the Christchurch shooting. It has not been difficult to find calls for violence. On Monday, March 25 of this year, I ran across evidence of anons translating the Christchurch shooter’s manifesto into other languages in an attempt to inspire more shootings across the globe.<br><br>This tactic can work, and today’s shooting is proof. The Poway Synagogue shooter directly cited the Christchurch shooter as his inspiration, saying he decided to carry out his attack roughly two weeks after that shooting. On /pol/, many anons refer to the Christchurch shooter, Brenton Tarrant, as “Saint Tarrant,” complete with medieval-inspired iconography.</i><br><br>Manifestos posted to 8chan are translated and reshared by other platform users (T0101: Create Localised Content, T0146.006: Open Access Platform, T0151.012: Image Board Platform, T0115: Post Content, T0084.004: Appropriate Content).<br><br><i>When I began looking through /pol/ right after the Poway Synagogue shooting, I came across several claims that the shootings had been a “false flag” aimed at making the message board look bad.<br><br>When Bellingcat tweeted out a warning about shitposting and the shooter’s manifesto, in the immediate wake of the attack, probable anons even commented on the Tweet in an attempt to deny that a channer had been behind the attack.<br><br>This is a recognizable pattern that occurs in the wake of any crimes committed by members of the board. While the initial response to the Christchurch shooter’s massacre thread was riotous glee, in the days after the shooting many anons began to claim the attack had been a false flag. This actually sparked significant division and debate between the members of /pol/. In the below image, a user mocks other anons for being unable to “believe something in your favor is real.” Another anon responds, “As the evidence comes out, its [sic] quite clear that this was a false flag.”<br><br>In his manifesto, the Poway Synagogue shooter even weighed in on this debate, accusing other anons who called the Christchurch and Tree of Life Synagogue shootings “false flags” to merely have been scared: “They can’t fathom that there are brave White men alive who have the willpower and courage it takes to say, ‘Fuck my life—I’m willing to sacrifice everything for the benefit of my race.’”</i><br><br>Platform users deny that their platform has been used by mass shooters to publish their manifestos (T0129.006: Deny Involvement). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,15 +15,16 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.bbc.co.uk/news/articles/ckg9k5dv1zdo](https://www.bbc.co.uk/news/articles/ckg9k5dv1zdo) | 2024/10/05 | Marianna Spring | BBC News | [https://web.archive.org/web/20241009114904/https://www.bbc.com/news/articles/ckg9k5dv1zdo](https://web.archive.org/web/20241009114904/https://www.bbc.com/news/articles/ckg9k5dv1zdo) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0088.001 Develop AI-Generated Audio (Deepfakes)](../../generated_pages/techniques/T0088.001.md) | IT00000385 <i>“I seriously don't understand why I have to constantly put up with these dumbasses here every day.”<br><br>So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.<br><br>The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.<br><br>The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.<br><br>[...]<br><br>But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.<br><br>[...]<br><br>[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.<br><br>And they believed they knew who made the fake.<br><br>Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.<br><br>He was arrested at the airport, where police say he was planning to fly to Houston, Texas.<br><br>Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.<br><br>Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.<br><br>Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.</i><br><br>By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). |
|
||||
| [T0146 Account](../../generated_pages/techniques/T0146.md) | IT00000387 <i>“I seriously don't understand why I have to constantly put up with these dumbasses here every day.”<br><br>So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.<br><br>The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.<br><br>The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.<br><br>[...]<br><br>But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.<br><br>[...]<br><br>[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.<br><br>And they believed they knew who made the fake.<br><br>Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.<br><br>He was arrested at the airport, where police say he was planning to fly to Houston, Texas.<br><br>Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.<br><br>Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.<br><br>Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.</i><br><br>By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). |
|
||||
| [T0149.005 Server](../../generated_pages/techniques/T0149.005.md) | IT00000384 <i>“I seriously don't understand why I have to constantly put up with these dumbasses here every day.”<br><br>So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.<br><br>The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.<br><br>The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.<br><br>[...]<br><br>But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.<br><br>[...]<br><br>[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.<br><br>And they believed they knew who made the fake.<br><br>Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.<br><br>He was arrested at the airport, where police say he was planning to fly to Houston, Texas.<br><br>Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.<br><br>Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.<br><br>Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.</i><br><br>By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). |
|
||||
| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) | IT00000386 <i>“I seriously don't understand why I have to constantly put up with these dumbasses here every day.”<br><br>So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.<br><br>The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.<br><br>The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.<br><br>[...]<br><br>But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.<br><br>[...]<br><br>[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.<br><br>And they believed they knew who made the fake.<br><br>Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.<br><br>He was arrested at the airport, where police say he was planning to fly to Houston, Texas.<br><br>Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.<br><br>Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.<br><br>Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.</i><br><br>By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). |
|
||||
| [T0088.001 Develop AI-Generated Audio (Deepfakes)](../../generated_pages/techniques/T0088.001.md) | IT00000385 <i>“I seriously don't understand why I have to constantly put up with these dumbasses here every day.”<br><br>So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.<br><br>The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.<br><br>The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.<br><br>[...]<br><br>But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.<br><br>[...]<br><br>[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.<br><br>And they believed they knew who made the fake.<br><br>Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.<br><br>He was arrested at the airport, where police say he was planning to fly to Houston, Texas.<br><br>Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.<br><br>Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.<br><br>Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.</i><br><br>By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). |
|
||||
| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) | IT00000387 <i>“I seriously don't understand why I have to constantly put up with these dumbasses here every day.”<br><br>So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.<br><br>The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.<br><br>The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.<br><br>[...]<br><br>But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.<br><br>[...]<br><br>[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.<br><br>And they believed they knew who made the fake.<br><br>Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.<br><br>He was arrested at the airport, where police say he was planning to fly to Houston, Texas.<br><br>Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.<br><br>Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.<br><br>Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.</i><br><br>By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). |
|
||||
| [T0149.005 Server Asset](../../generated_pages/techniques/T0149.005.md) | IT00000384 <i>“I seriously don't understand why I have to constantly put up with these dumbasses here every day.”<br><br>So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.<br><br>The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.<br><br>The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.<br><br>[...]<br><br>But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.<br><br>[...]<br><br>[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.<br><br>And they believed they knew who made the fake.<br><br>Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.<br><br>He was arrested at the airport, where police say he was planning to fly to Houston, Texas.<br><br>Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.<br><br>Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.<br><br>Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.</i><br><br>By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). |
|
||||
| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) | IT00000386 <i>“I seriously don't understand why I have to constantly put up with these dumbasses here every day.”<br><br>So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.<br><br>The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.<br><br>The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.<br><br>[...]<br><br>But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.<br><br>[...]<br><br>[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.<br><br>And they believed they knew who made the fake.<br><br>Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.<br><br>He was arrested at the airport, where police say he was planning to fly to Houston, Texas.<br><br>Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.<br><br>Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.<br><br>Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.</i><br><br>By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,6 +15,7 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.motherjones.com/politics/2017/05/emmanuel-macron-hacking-email-leak-france/](https://www.motherjones.com/politics/2017/05/emmanuel-macron-hacking-email-leak-france/) | 2017/05/05 | AJ Vicens, Pema Levy, Laura Smith | Mother Jones | [https://web.archive.org/web/20240628030952/https://www.motherjones.com/politics/2017/05/emmanuel-macron-hacking-email-leak-france/](https://web.archive.org/web/20240628030952/https://www.motherjones.com/politics/2017/05/emmanuel-macron-hacking-email-leak-france/) |
|
||||
|
||||
|
||||
|
||||
@ -22,7 +23,7 @@
|
||||
| --------- | ------------------------- |
|
||||
| [T0115 Post Content](../../generated_pages/techniques/T0115.md) | IT00000391 <i>A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.<br><br>At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.<br><br>“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”<br><br>The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”</i><br><br>Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. |
|
||||
| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) | IT00000392 <i>A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.<br><br>At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.<br><br>“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”<br><br>The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”</i><br><br>Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. |
|
||||
| [T0146.007 Automated Account](../../generated_pages/techniques/T0146.007.md) | IT00000389 <i>A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.<br><br>At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.<br><br>“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”<br><br>The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”</i><br><br>Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. |
|
||||
| [T0146.007 Automated Account Asset](../../generated_pages/techniques/T0146.007.md) | IT00000389 <i>A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.<br><br>At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.<br><br>“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”<br><br>The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”</i><br><br>Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. |
|
||||
| [T0151.012 Image Board Platform](../../generated_pages/techniques/T0151.012.md) | IT00000388 <i>A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.<br><br>At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.<br><br>“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”<br><br>The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”</i><br><br>Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. |
|
||||
| [T0152.005 Paste Platform](../../generated_pages/techniques/T0152.005.md) | IT00000390 <i>A massive trove of documents purporting to contain thousands of emails and other files from the [2017 presidential] campaign of Emmanuel Macron—the French centrist candidate squaring off against right-wing nationalist Marine Le Pen—was posted on the internet Friday afternoon. The Macron campaign says that at least some of the documents are fake. The document dump came just over a day before voting is set to begin in the final round of the election and mere hours before candidates are legally required to stop campaigning.<br><br>At about 2:35 p.m. ET, a post appeared on the 4chan online message board announcing the leak. The documents appear to include emails, internal memos, and screenshots of purported banking records.<br><br>“In this pastebin are links to torrents of emails between Macron, his team and other officials, politicians as well as original documents and photos,” the anonymous 4chan poster wrote. “This was passed on to me today so now I am giving it to you, the people. The leak is massvie and released in the hopes that the human search engine here will be able to start sifting through the contents and figure out exactly what we have here.”<br><br>The Macron campaign issued a statement Friday night saying it was the victim of a “massive and coordinated” hacking attack. That campaign said the leak included some fake documents that were intended “to sow doubt and misinformation.”</i><br><br>Actors posted a to 4chan a link (T0151.012: Image Board Platform, T0146.006: Open Access Platform, T0115: Post Content, T0122: Direct Users to Alternative Platforms) to text content hosted on pastebin (T0152.005: Paste Platform, T0146.006: Open Access Platform, T0115: Post Content), which contained links to download stolen and fabricated documents. |
|
||||
|
||||
|
@ -15,18 +15,19 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://gnet-research.org/2024/06/10/gaming-the-system-the-use-of-gaming-adjacent-communication-game-and-mod-platforms-by-extremist-actors/](https://gnet-research.org/2024/06/10/gaming-the-system-the-use-of-gaming-adjacent-communication-game-and-mod-platforms-by-extremist-actors/) | 2024/06/10 | Constantin Winkler, Lars Wiegold | Global Network on Extremism & Technology | [https://web.archive.org/web/20241009034732/https://gnet-research.org/2024/06/10/gaming-the-system-the-use-of-gaming-adjacent-communication-game-and-mod-platforms-by-extremist-actors/](https://web.archive.org/web/20241009034732/https://gnet-research.org/2024/06/10/gaming-the-system-the-use-of-gaming-adjacent-communication-game-and-mod-platforms-by-extremist-actors/) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0147.001 Game](../../generated_pages/techniques/T0147.001.md) | IT00000397 <i>In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br>Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB]. <br><br>[...]<br><br>[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.</i><br><br>Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game). |
|
||||
| [T0147.002 Game Mod](../../generated_pages/techniques/T0147.002.md) | IT00000398 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br><i>Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.<br><br>[...]<br><br>During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.<br><br>Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.</i><br><br>Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod). |
|
||||
| [T0151.002 Online Community Group](../../generated_pages/techniques/T0151.002.md) | IT00000395 <i>In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br>Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB]. <br><br>[...]<br><br>[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.</i><br><br>Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game). |
|
||||
| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) | IT00000394 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br><i>Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.<br><br>The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.</i><br><br>Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0152.009 Software Delivery Platform](../../generated_pages/techniques/T0152.009.md) | IT00000396 <i>In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br>Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB]. <br><br>[...]<br><br>[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.</i><br><br>Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game). |
|
||||
| [T0152.009 Software Delivery Platform](../../generated_pages/techniques/T0152.009.md) | IT00000399 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br><i>Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.<br><br>[...]<br><br>During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.<br><br>Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.</i><br><br>Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod). |
|
||||
| [T0155.003 Approval Gated](../../generated_pages/techniques/T0155.003.md) | IT00000393 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br><i>Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.<br><br>The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.</i><br><br>Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0147.001 Game Asset](../../generated_pages/techniques/T0147.001.md) | IT00000397 <i>In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br>Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB]. <br><br>[...]<br><br>[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.</i><br><br>Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game Asset). |
|
||||
| [T0147.002 Game Mod Asset](../../generated_pages/techniques/T0147.002.md) | IT00000398 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br><i>Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.<br><br>[...]<br><br>During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.<br><br>Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.</i><br><br>Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod Asset). |
|
||||
| [T0151.002 Online Community Group](../../generated_pages/techniques/T0151.002.md) | IT00000395 <i>In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br>Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB]. <br><br>[...]<br><br>[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.</i><br><br>Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game Asset). |
|
||||
| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) | IT00000394 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br><i>Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.<br><br>The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.</i><br><br>Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated Asset, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0152.009 Software Delivery Platform](../../generated_pages/techniques/T0152.009.md) | IT00000396 <i>In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br>Indie DB [is a platform that serves] to present indie games, which are titles from independent, small developer teams, which can be discussed and downloaded [Indie DB]. <br><br>[...]<br><br>[On Indie DB we] found antisemitic, Islamist, sexist and other discriminatory content during the exploration. Both games and comments were located that made positive references to National Socialism. Radicalised users seem to use the opportunities to network, create groups, and communicate in forums. In addition, a number of member profiles with graphic propaganda content could be located. We found several games with propagandistic content advertised on the platform, which caused apparently radicalised users to form a fan community around those games. For example, there are titles such as the antisemitic Fursan Al-Aqsa: The Knights of the Al-Aqsa Mosque, which has won the Best Hardcore Game award at the Game Connection America 2024 Game Development Awards and which has received antisemitic praise on its review page. In the game, players need to target members of the Israeli Defence Forces, and attacks by Palestinian terrorist groups such as Hamas or Lions’ Den can be re-enacted. These include bomb attacks, beheadings and the re-enactment of the terrorist attack in Israel on 7 October 2023. We, therefore, deem Indie DB to be relevant for further analyses of extremist activities in digital gaming spaces.</i><br><br>Indie DB is an online software delivery platform on which users can create groups, and participate in discussion forums (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0151.009: Legacy Online Forum Platform). The platform hosted games which allowed players to reenact terrorist attacks (T0147.001: Game Asset). |
|
||||
| [T0152.009 Software Delivery Platform](../../generated_pages/techniques/T0152.009.md) | IT00000399 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br><i>Gamebanana and Mod DB are so-called modding platforms that allow users to post their modifications of existing (popular) games. In the process of modding, highly radicalised content can be inserted into games that did not originally contain it. All of these platforms also have communication functions and customisable profiles.<br><br>[...]<br><br>During the explorations, several modifications with hateful themes were located, including right-wing extremist, racist, antisemitic and Islamist content. This includes mods that make it possible to play as terrorists or National Socialists. So-called “skins” (textures that change the appearance of models in the game) for characters from first-person shooters are particularly popular and contain references to National Socialism or Islamist terrorist organisations. Although some of this content could be justified with reference to historical accuracy and realism, the user profiles of the creators and commentators often reveal political motivations. Names with neo-Nazi codes or the use of avatars showing members of the Wehrmacht or the Waffen SS, for example, indicate a certain degree of positive appreciation or fascination with right-wing ideology, as do affirmations in the comment columns.<br><br>Mod DB in particular has attracted public attention in the past. For example, a mod for the game Half-Life 2 made it possible to play a school shooting with the weapons used during the attacks at Columbine High School (1999) and Virginia Polytechnic Institute and State University (2007). Antisemitic memes and jokes are shared in several groups on the platform. It seems as if users partially connect with each other because of shared political views. There were also indications that Islamist and right-wing extremist users network on the basis of shared views on women, Jews or homosexuals. In addition to relevant usernames and avatars, we found profiles featuring picture galleries, backgrounds and banners dedicated to the SS. Extremist propaganda and radicalisation processes on modding platforms have not been explored yet, but our exploration suggests these digital spaces to be highly relevant for our field.</i><br><br>Mod DB is a platform which allows users to upload mods for games, which other users can download (T0152.009: Software Delivery Platform, T0147.002: Game Mod Asset). |
|
||||
| [T0155.003 Approval Gated Asset](../../generated_pages/techniques/T0155.003.md) | IT00000393 In this report, researchers look at online platforms commonly used by people who play videogames, looking at how these platforms can contribute to radicalisation of gamers:<br><br><i>Gamer Uprising Forums (GUF) [is an online discussion platform using the classic forum structure] aimed directly at gamers. It is run by US Neo-Nazi Andrew Anglin and explicitly targets politically right-wing gamers. This forum mainly includes antisemitic, sexist, and racist topics, but also posts on related issues such as esotericism, conspiracy narratives, pro-Russian propaganda, alternative medicine, Christian religion, content related to the incel- and manosphere, lists of criminal offences committed by non-white people, links to right-wing news sites, homophobia and trans-hostility, troll guides, anti-leftism, ableism and much more. Most noticeable were the high number of antisemitic references. For example, there is a thread with hundreds of machine-generated images, most of which feature openly antisemitic content and popular antisemitic references. Many users chose explicitly antisemitic avatars. Some of the usernames also provide clues to the users’ ideologies and profiles feature swastikas as a type of progress bar and indicator of the user’s activity in the forum.<br><br>The GUF’s front page contains an overview of the forum, user statistics, and so-called “announcements”. In addition to advice-like references, these feature various expressions of hateful ideologies. At the time of the exploration, the following could be read there: “Jews are the problem!”, “Women should be raped”, “The Jews are going to be required to return stolen property”, “Immigrants will have to be physically removed”, “Console gaming is for n******” and “Anger is a womanly emotion”. New users have to prove themselves in an area for newcomers referred to in imageboard slang as the “Newfag Barn”. Only when the newcomers’ posts have received a substantial number of likes from established users, are they allowed to post in other parts of the forum. It can be assumed that this will also lead to competitions to outdo each other in posting extreme content. However, it is always possible to view all posts and content on the site. In any case, it can be assumed that the platform hardly addresses milieus that are not already radicalised or at risk of radicalisation and is therefore deemed relevant for radicalisation research. However, the number of registered users is low (typical for radicalised milieus) and, hence, the platform may only be of interest when studying a small group of highly radicalised individuals.</i><br><br>Gamer Uprising Forum is a legacy online forum, with access gated behind approval of existing platform users (T0155.003: Approval Gated Asset, T0151.009: Legacy Online Forum Platform). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,17 +15,18 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://futurism.com/hurricane-scam-ai-slop](https://futurism.com/hurricane-scam-ai-slop) | 2024/10/01 | Maggie Harrison Dupré | Futurism | [https://web.archive.org/web/20241007213926/https://futurism.com/hurricane-scam-ai-slop](https://web.archive.org/web/20241007213926/https://futurism.com/hurricane-scam-ai-slop) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0068 Respond to Breaking News Event or Active Crisis](../../generated_pages/techniques/T0068.md) | IT00000403 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). |
|
||||
| [T0086.002 Develop AI-Generated Images (Deepfakes)](../../generated_pages/techniques/T0086.002.md) | IT00000402 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). |
|
||||
| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) | IT00000404 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). |
|
||||
| [T0148.007 eCommerce Platform](../../generated_pages/techniques/T0148.007.md) | IT00000405 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000400 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). |
|
||||
| [T0151.003 Online Community Page](../../generated_pages/techniques/T0151.003.md) | IT00000401 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). |
|
||||
| [T0068 Respond to Breaking News Event or Active Crisis](../../generated_pages/techniques/T0068.md) | IT00000403 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). |
|
||||
| [T0086.002 Develop AI-Generated Images (Deepfakes)](../../generated_pages/techniques/T0086.002.md) | IT00000402 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). |
|
||||
| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) | IT00000404 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). |
|
||||
| [T0148.007 eCommerce Platform](../../generated_pages/techniques/T0148.007.md) | IT00000405 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000400 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). |
|
||||
| [T0151.003 Online Community Page](../../generated_pages/techniques/T0151.003.md) | IT00000401 <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,6 +15,7 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.foreignaffairs.com/russia/lies-russia-tells-itself](https://www.foreignaffairs.com/russia/lies-russia-tells-itself) | 2024/09/30 | Thomas Rid | Forreign Affairs | [https://web.archive.org/web/20241009145602/https://www.foreignaffairs.com/russia/lies-russia-tells-itself](https://web.archive.org/web/20241009145602/https://www.foreignaffairs.com/russia/lies-russia-tells-itself) |
|
||||
|
||||
|
||||
|
||||
|
@ -15,19 +15,20 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.disinfo.eu/publications/suavelos-white-supremacists-funded-through-facebook](https://www.disinfo.eu/publications/suavelos-white-supremacists-funded-through-facebook) | 2020/09/11 | Alexandre Alaphilippe, Roman Adamczyk, Gary Machado | EU Disinfo Lab | [https://web.archive.org/web/20240525113912/https://www.disinfo.eu/publications/suavelos-white-supremacists-funded-through-facebook/](https://web.archive.org/web/20240525113912/https://www.disinfo.eu/publications/suavelos-white-supremacists-funded-through-facebook/) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0092 Build Network](../../generated_pages/techniques/T0092.md) | IT00000410 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). |
|
||||
| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) | IT00000415 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000411 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). |
|
||||
| [T0151.003 Online Community Page](../../generated_pages/techniques/T0151.003.md) | IT00000413 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). |
|
||||
| [T0152.003 Website Hosting Platform](../../generated_pages/techniques/T0152.003.md) | IT00000416 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). |
|
||||
| [T0152.004 Website](../../generated_pages/techniques/T0152.004.md) | IT00000417 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). |
|
||||
| [T0153.005 Online Advertising Platform](../../generated_pages/techniques/T0153.005.md) | IT00000418 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). |
|
||||
| [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) | IT00000412 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account, T0153.005: Online Advertising Platform). |
|
||||
| [T0092 Build Network](../../generated_pages/techniques/T0092.md) | IT00000410 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). |
|
||||
| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) | IT00000415 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000411 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). |
|
||||
| [T0151.003 Online Community Page](../../generated_pages/techniques/T0151.003.md) | IT00000413 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). |
|
||||
| [T0152.003 Website Hosting Platform](../../generated_pages/techniques/T0152.003.md) | IT00000416 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). |
|
||||
| [T0152.004 Website Asset](../../generated_pages/techniques/T0152.004.md) | IT00000417 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). |
|
||||
| [T0153.005 Online Advertising Platform](../../generated_pages/techniques/T0153.005.md) | IT00000418 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). |
|
||||
| [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) | IT00000412 <i>This article examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology:<br><br>Suavelos uses Facebook and other platforms to amplify its message. In order to bypass the platforms’ community standards and keep their public pages active, Facebook pages such as “I support the police” are a good vehicle to spread a specific agenda without claiming to be racist. In looking back at this Facebook page, we followed Facebook’s algorithm for related pages and found suggested Facebook pages<br><br>[...]<br><br>This amplification strategy on Facebook is successful, as according to SimilarWeb figures, it attracts around 111,000 visits every month on the Suavelos.eu website.<br><br>[...]<br><br>Revenue through online advertisements can be achieved by different platforms through targeted advertisements, like Google Adsense or Doubleclick, or related and similar sponsored content, such as Taboola. Accordingly, Suavelos.eu uses both of these websites to display advertisements and consequently receives funding from such advertisements.<br><br>Once visitors are on the website supporting its advertisement revenue, Suavelos’ goal is to then turn these visitors into regular members of Suavelos network through donations or fees, or have them continue to support Suavelos. </i><br><br>Suevelos created a variety of pages on Facebook which presented as centring on prosocial causes. Facebook’s algorithm helped direct users to these pages (T0092: Build Network, T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm, T0151.003: Online Community Page, T0143.208: Social Cause Persona).<br><br>Suevelos used these pages to generate traffic for their WordPress site (T0122: Direct Users to Alternative Platforms, T0152.003: Website Hosting Platform, T0152.004: Website Asset), which used accounts on a variety of online advertising platforms to host adverts (T0146: Account Asset, T0153.005: Online Advertising Platform). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,27 +15,28 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.disinfo.eu/wp-content/uploads/2019/09/20190911_Disinformation_HateSpeechFunding.pdf](https://www.disinfo.eu/wp-content/uploads/2019/09/20190911_Disinformation_HateSpeechFunding.pdf) | 2019/09/11 | - | EU Disinfo Lab | [https://web.archive.org/web/20221214105547/https://www.disinfo.eu/wp-content/uploads/2019/09/20190911_Disinformation_HateSpeechFunding.pdf](https://web.archive.org/web/20221214105547/https://www.disinfo.eu/wp-content/uploads/2019/09/20190911_Disinformation_HateSpeechFunding.pdf) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0061 Sell Merchandise](../../generated_pages/techniques/T0061.md) | IT00000430 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). |
|
||||
| [T0097.207 NGO Persona](../../generated_pages/techniques/T0097.207.md) | IT00000431 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). |
|
||||
| [T0146 Account](../../generated_pages/techniques/T0146.md) | IT00000424 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). |
|
||||
| [T0148.003 Payment Processing Platform](../../generated_pages/techniques/T0148.003.md) | IT00000421 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). |
|
||||
| [T0148.004 Payment Processing Capability](../../generated_pages/techniques/T0148.004.md) | IT00000420 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). |
|
||||
| [T0148.004 Payment Processing Capability](../../generated_pages/techniques/T0148.004.md) | IT00000429 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). |
|
||||
| [T0149.001 Domain](../../generated_pages/techniques/T0149.001.md) | IT00000428 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). |
|
||||
| [T0149.005 Server](../../generated_pages/techniques/T0149.005.md) | IT00000433 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). |
|
||||
| [T0149.006 IP Address](../../generated_pages/techniques/T0149.006.md) | IT00000434 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). |
|
||||
| [T0150.006 Purchased](../../generated_pages/techniques/T0150.006.md) | IT00000432 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000427 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000425 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). |
|
||||
| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) | IT00000422 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). |
|
||||
| [T0152.004 Website](../../generated_pages/techniques/T0152.004.md) | IT00000419 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). |
|
||||
| [T0152.006 Video Platform](../../generated_pages/techniques/T0152.006.md) | IT00000426 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). |
|
||||
| [T0155 Gated Asset](../../generated_pages/techniques/T0155.md) | IT00000423 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account, T0151.008: Microblogging Platform), YouTube (T0146: Account, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account, T0151.001: Social Media Platform). |
|
||||
| [T0061 Sell Merchandise](../../generated_pages/techniques/T0061.md) | IT00000430 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.</i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). |
|
||||
| [T0097.207 NGO Persona](../../generated_pages/techniques/T0097.207.md) | IT00000431 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.</i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). |
|
||||
| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) | IT00000424 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). |
|
||||
| [T0148.003 Payment Processing Platform](../../generated_pages/techniques/T0148.003.md) | IT00000421 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). |
|
||||
| [T0148.004 Payment Processing Capability](../../generated_pages/techniques/T0148.004.md) | IT00000420 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). |
|
||||
| [T0148.004 Payment Processing Capability](../../generated_pages/techniques/T0148.004.md) | IT00000429 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.</i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). |
|
||||
| [T0149.001 Domain Asset](../../generated_pages/techniques/T0149.001.md) | IT00000428 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.</i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). |
|
||||
| [T0149.005 Server Asset](../../generated_pages/techniques/T0149.005.md) | IT00000433 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.</i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). |
|
||||
| [T0149.006 IP Address Asset](../../generated_pages/techniques/T0149.006.md) | IT00000434 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.</i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). |
|
||||
| [T0150.006 Purchased Asset](../../generated_pages/techniques/T0150.006.md) | IT00000432 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.</i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000427 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000425 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). |
|
||||
| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) | IT00000422 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). |
|
||||
| [T0152.004 Website Asset](../../generated_pages/techniques/T0152.004.md) | IT00000419 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). |
|
||||
| [T0152.006 Video Platform](../../generated_pages/techniques/T0152.006.md) | IT00000426 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). |
|
||||
| [T0155 Gated Asset](../../generated_pages/techniques/T0155.md) | IT00000423 This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at the Suavelos website, and the content it links out to.<br><br><i>In going back to Suavelos’ main page, we also found: A link to a page on a web shop: alabastro.eu; A link to a page to donate money to the founders through Tipee and to the website through PayPal; [and] a link to a private forum that gathers 3.000 members: oppidum.suavelos.eu;</i><br><br>Suavelos linked out to an online store which it controlled (T0152.004: Website Asset, T0148.004: Payment Processing Capability), and to accounts on payment processing platforms PayPal and Tipee (T0146: Account Asset, T0148.003: Payment Processing Platform). <br><br>The Suavelos website also hosted a private forum (T0151.009: Legacy Online Forum Platform, T0155: Gated Asset), and linked out to a variety of assets it controlled on other online platforms: accounts on Twitter (T0146: Account Asset, T0151.008: Microblogging Platform), YouTube (T0146: Account Asset, T0152.006: Video Platform), Instagram and VKontakte (T0146: Account Asset, T0151.001: Social Media Platform). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,20 +15,21 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.disinfo.eu/publications/how-covid-19-conspiracists-and-extremists-use-crowdfunding-platforms-to-fund-their-activities/](https://www.disinfo.eu/publications/how-covid-19-conspiracists-and-extremists-use-crowdfunding-platforms-to-fund-their-activities/) | 2024/10/01 | - | EU Disinfo Lab | [https://web.archive.org/web/20240910182937/https://www.disinfo.eu/publications/how-covid-19-conspiracists-and-extremists-use-crowdfunding-platforms-to-fund-their-activities](https://web.archive.org/web/20240910182937/https://www.disinfo.eu/publications/how-covid-19-conspiracists-and-extremists-use-crowdfunding-platforms-to-fund-their-activities) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0017 Conduct Fundraising](../../generated_pages/techniques/T0017.md) | IT00000435 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0085.005 Develop Book](../../generated_pages/techniques/T0085.005.md) | IT00000442 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) | IT00000436 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0097.202 News Outlet Persona](../../generated_pages/techniques/T0097.202.md) | IT00000439 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0121.001 Bypass Content Blocking](../../generated_pages/techniques/T0121.001.md) | IT00000440 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0146 Account](../../generated_pages/techniques/T0146.md) | IT00000438 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0148.006 Crowdfunding Platform](../../generated_pages/techniques/T0148.006.md) | IT00000437 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0148.007 eCommerce Platform](../../generated_pages/techniques/T0148.007.md) | IT00000443 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) | IT00000441 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0017 Conduct Fundraising](../../generated_pages/techniques/T0017.md) | IT00000435 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0085.005 Develop Book](../../generated_pages/techniques/T0085.005.md) | IT00000442 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) | IT00000436 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0097.202 News Outlet Persona](../../generated_pages/techniques/T0097.202.md) | IT00000439 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0121.001 Bypass Content Blocking](../../generated_pages/techniques/T0121.001.md) | IT00000440 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) | IT00000438 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0148.006 Crowdfunding Platform](../../generated_pages/techniques/T0148.006.md) | IT00000437 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0148.007 eCommerce Platform](../../generated_pages/techniques/T0148.007.md) | IT00000443 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) | IT00000441 The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,18 +15,19 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.vice.com/en/article/patreon-is-bankrolling-climate-change-deniers-while-we-all-burn/](https://www.vice.com/en/article/patreon-is-bankrolling-climate-change-deniers-while-we-all-burn/) | 2021/06/30 | David Gilbert | Vice | [-](-) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0085 Develop Text-Based Content](../../generated_pages/techniques/T0085.md) | IT00000444 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). |
|
||||
| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) | IT00000445 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). |
|
||||
| [T0088 Develop Audio-Based Content](../../generated_pages/techniques/T0088.md) | IT00000446 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). |
|
||||
| [T0146 Account](../../generated_pages/techniques/T0146.md) | IT00000447 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). |
|
||||
| [T0151.014 Comments Section](../../generated_pages/techniques/T0151.014.md) | IT00000449 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). |
|
||||
| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) | IT00000448 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). |
|
||||
| [T0155.006 Subscription Access](../../generated_pages/techniques/T0155.006.md) | IT00000450 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). |
|
||||
| [T0085 Develop Text-Based Content](../../generated_pages/techniques/T0085.md) | IT00000444 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). |
|
||||
| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) | IT00000445 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). |
|
||||
| [T0088 Develop Audio-Based Content](../../generated_pages/techniques/T0088.md) | IT00000446 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). |
|
||||
| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) | IT00000447 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). |
|
||||
| [T0151.014 Comments Section](../../generated_pages/techniques/T0151.014.md) | IT00000449 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). |
|
||||
| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) | IT00000448 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). |
|
||||
| [T0155.006 Subscription Access Asset](../../generated_pages/techniques/T0155.006.md) | IT00000450 In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,14 +15,15 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.disinfo.eu/publications/patreon-allows-disinformation-and-conspiracies-to-be-monetised-in-spain/](https://www.disinfo.eu/publications/patreon-allows-disinformation-and-conspiracies-to-be-monetised-in-spain/) | 2020/12/18 | - | EU Disinfo Lab | [https://web.archive.org/web/20201218092320/https://www.disinfo.eu/publications/patreon-allows-disinformation-and-conspiracies-to-be-monetised-in-spain/](https://web.archive.org/web/20201218092320/https://www.disinfo.eu/publications/patreon-allows-disinformation-and-conspiracies-to-be-monetised-in-spain/) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0121.001 Bypass Content Blocking](../../generated_pages/techniques/T0121.001.md) | IT00000452 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:<br><br>In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.<br><br>Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.<br><br>Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.<br><br>[...]<br><br>Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.<br><br>Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.</i><br><br>In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).<br><br>Some actors were observed accepting donations via PayPal (T0146: Account, T0148.003: Payment Processing Platform). |
|
||||
| [T0148.003 Payment Processing Platform](../../generated_pages/techniques/T0148.003.md) | IT00000451 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:<br><br>In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.<br><br>Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.<br><br>Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.<br><br>[...]<br><br>Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.<br><br>Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.</i><br><br>In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).<br><br>Some actors were observed accepting donations via PayPal (T0146: Account, T0148.003: Payment Processing Platform). |
|
||||
| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) | IT00000453 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:<br><br>In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.<br><br>Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.<br><br>Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.<br><br>[...]<br><br>Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.<br><br>Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.</i><br><br>In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).<br><br>Some actors were observed accepting donations via PayPal (T0146: Account, T0148.003: Payment Processing Platform). |
|
||||
| [T0121.001 Bypass Content Blocking](../../generated_pages/techniques/T0121.001.md) | IT00000452 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:<br><br><i>In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.<br><br>Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.<br><br>Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.<br><br>[...]<br><br>Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.<br><br>Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.</i><br><br>In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account Asset, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).<br><br>Some actors were observed accepting donations via PayPal (T0146: Account Asset, T0148.003: Payment Processing Platform). |
|
||||
| [T0148.003 Payment Processing Platform](../../generated_pages/techniques/T0148.003.md) | IT00000451 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:<br><br><i>In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.<br><br>Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.<br><br>Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.<br><br>[...]<br><br>Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.<br><br>Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.</i><br><br>In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account Asset, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).<br><br>Some actors were observed accepting donations via PayPal (T0146: Account Asset, T0148.003: Payment Processing Platform). |
|
||||
| [T0152.012 Subscription Service Platform](../../generated_pages/techniques/T0152.012.md) | IT00000453 In this report EU DisinfoLab identified 17 Spanish accounts that monetise and/or spread QAnon content or other conspiracy theories on Patreon. Content produced by these accounts go against Patreon’s stated policy of removing creators that “advance disinformation promoting the QAnon conspiracy theory”. EU DisinfoLab found:<br><br><i>In most cases, the creators monetise the content directly on Patreon (posts are only accessible for people sponsoring the creators) but there are also cases of indirect monetization (monetization through links leading to other platforms), an aspect that was flagged and analysed by Eu DisinfoLab in the mentioned previous report.<br><br>Some creators display links that redirects users to other platforms such as YouTube or LBRY where they can monetise their content. Some even offer almost all of their videos for free by redirecting to their YouTube channel.<br><br>Another modus operandi is for the creators to advertise on Patreon that they are looking for financing through PayPal or provide the author’s email to explore other financing alternatives.<br><br>[...]<br><br>Sometimes the content offered for a fee on Patreon is freely accessible on other platforms. Creators openly explain that they seek voluntary donation on Patreon, but that their creations will be public on YouTube. This means that the model of these platforms does not always involve a direct monetisation of the content. Creators who have built a strong reputation previously on other platforms can use Patreon as a platform to get some sponsorship which is not related to the content and give them more freedom to create.<br><br>Some users explicitly claim to use Patreon as a secondary channel to evade “censorship” by other platforms such as YouTube. Patreon seems to be perceived as a safe haven for disinformation and fringe content that has been suppressed for violating the policies of other platforms. For example, Jorge Guerra points out how Patreon acts as a back-up channel to go to in case of censorship by YouTube. Meanwhile, Alfa Mind openly claims to use Patreon to publish content that is not allowed on YouTube. “Exclusive access to videos that are prohibited on YouTube. These videos are only accessible on Patreon, as their content is very explicit and shocking”, are offered to the patrons who pay €3 per month.</i><br><br>In spite of Patreon’s stated policy, actors use accounts on their platform to generate revenue or donations for their content, and to provide a space to host content which was removed from other platforms (T0146: Account Asset, T0152.012: Subscription Service Platform, T0121.001: Bypass Content Bocking).<br><br>Some actors were observed accepting donations via PayPal (T0146: Account Asset, T0148.003: Payment Processing Platform). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,21 +15,22 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://assets.mofoprod.net/network/documents/Report_Inside_the_shadowy_world_of_disinformation_for_hire_in_Kenya_5._hcc.pdf](https://assets.mofoprod.net/network/documents/Report_Inside_the_shadowy_world_of_disinformation_for_hire_in_Kenya_5._hcc.pdf) | 2021/09/03 | - | Mozilla | [https://web.archive.org/web/20240927031830/https://assets.mofoprod.net/network/documents/Report_Inside_the_shadowy_world_of_disinformation_for_hire_in_Kenya_5._hcc.pdf](https://web.archive.org/web/20240927031830/https://assets.mofoprod.net/network/documents/Report_Inside_the_shadowy_world_of_disinformation_for_hire_in_Kenya_5._hcc.pdf) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0121 Manipulate Platform Algorithm](../../generated_pages/techniques/T0121.md) | IT00000459 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0129.005 Coordinate on Encrypted/Closed Networks](../../generated_pages/techniques/T0129.005.md) | IT00000456 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0146.003 Verified Account](../../generated_pages/techniques/T0146.003.md) | IT00000461 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing <i>“a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”.</i> The report touches upon how actors gained access to twitter accounts, and what personas they presented:<br><br><i>Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.<br><br>[...]<br><br>Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.</i><br><br>Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account, T0150.007: Rented, T0150.004: Repurposed, T00145.006: Attractive Person Account Imagery). |
|
||||
| [T0148.001 Online Banking Platform](../../generated_pages/techniques/T0148.001.md) | IT00000455 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0148.002 Bank Account](../../generated_pages/techniques/T0148.002.md) | IT00000454 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0150.004 Repurposed](../../generated_pages/techniques/T0150.004.md) | IT00000463 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing <i>“a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”.</i> The report touches upon how actors gained access to twitter accounts, and what personas they presented:<br><br><i>Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.<br><br>[...]<br><br>Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.</i><br><br>Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account, T0150.007: Rented, T0150.004: Repurposed, T00145.006: Attractive Person Account Imagery). |
|
||||
| [T0150.007 Rented](../../generated_pages/techniques/T0150.007.md) | IT00000462 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing <i>“a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”.</i> The report touches upon how actors gained access to twitter accounts, and what personas they presented:<br><br><i>Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.<br><br>[...]<br><br>Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.</i><br><br>Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account, T0150.007: Rented, T0150.004: Repurposed, T00145.006: Attractive Person Account Imagery). |
|
||||
| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) | IT00000458 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0151.007 Chat Broadcast Group](../../generated_pages/techniques/T0151.007.md) | IT00000457 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000460 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0121 Manipulate Platform Algorithm](../../generated_pages/techniques/T0121.md) | IT00000459 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0129.005 Coordinate on Encrypted/Closed Networks](../../generated_pages/techniques/T0129.005.md) | IT00000456 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0146.003 Verified Account Asset](../../generated_pages/techniques/T0146.003.md) | IT00000461 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing <i>“a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”.</i> The report touches upon how actors gained access to twitter accounts, and what personas they presented:<br><br><i>Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.<br><br>[...]<br><br>Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.</i><br><br>Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account Asset, T0150.007: Rented Asset, T0150.004: Repurposed Asset, T00145.006: Attractive Person Account Imagery). |
|
||||
| [T0148.001 Online Banking Platform](../../generated_pages/techniques/T0148.001.md) | IT00000455 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0148.002 Bank Account Asset](../../generated_pages/techniques/T0148.002.md) | IT00000454 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0150.004 Repurposed Asset](../../generated_pages/techniques/T0150.004.md) | IT00000463 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing <i>“a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”.</i> The report touches upon how actors gained access to twitter accounts, and what personas they presented:<br><br><i>Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.<br><br>[...]<br><br>Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.</i><br><br>Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account Asset, T0150.007: Rented Asset, T0150.004: Repurposed Asset, T00145.006: Attractive Person Account Imagery). |
|
||||
| [T0150.007 Rented Asset](../../generated_pages/techniques/T0150.007.md) | IT00000462 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing <i>“a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”.</i> The report touches upon how actors gained access to twitter accounts, and what personas they presented:<br><br><i>Verified accounts are complicit. One influencer we spoke to mentioned that the people who own coveted “blue check” accounts will often rent them out for disinformation campaigns. These verified accounts can improve the campaign’s chances of trending. Says one interviewee: “The owner of the account usually receives a cut of the campaign loot”.<br><br>[...]<br><br>Many of the accounts we examined appear to give an aura of authenticity, but in reality they are not authentic. Simply looking at their date of creation won’t give you a hint as to their purpose. We had to dig deeper. The profile pictures and content of some of the accounts gave us the answers we were looking for. A common tactic these accounts utilize is using suggestive pictures of women to bait men into following them, or at least pay attention. In terms of content, many of these accounts tweeted off the same hashtags for days on end and will constantly retweet a specific set of accounts.</i><br><br>Actors participating in this operation rented out verified Twitter accounts (in 2021 a checkmark on Twitter verified a user’s identity), which were repurposed and used updated account imagery (T0146.003: Verified Account Asset, T0150.007: Rented Asset, T0150.004: Repurposed Asset, T00145.006: Attractive Person Account Imagery). |
|
||||
| [T0151.004 Chat Platform](../../generated_pages/techniques/T0151.004.md) | IT00000458 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0151.007 Chat Broadcast Group](../../generated_pages/techniques/T0151.007.md) | IT00000457 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000460 Researchers at Mozilla examined influence operations targeting Kenyan citizens on Twitter in 2021, providing “a grim window into the booming and shadowy industry of Twitter influencers for political hire here in Kenya”, and giving insight into operations’ operationalisation:<br><br><i>In our interviews with one of the influencers, they informed us of the agile tactics they use to organize and avoid detection. For example, when it’s time to carry out the campaign the influencers would be added to a Whatsapp group. Here, they received direction about what to post, the hashtags to use, which tweets to engage with and who to target. Synchronizing the tweets was also incredibly important for them. It’s what enables them to achieve their goal of trending on Twitter and gain amplification.<br><br>[...]<br><br>They revealed to us that those participating in the exercise are paid roughly between $10 and $15 to participate in three campaigns per day. Each campaign execution involves tweeting about the hashtags of the day until it appears on the trending section of Twitter. Additionally, some individuals have managed to reach retainer level and get paid about $250 per month. Their job is to make sure the campaigns are executed on a day-by-day basis with different hashtags.</i><br><br>An M-PESA account (T0148.002: Bank Account Asset, T0148.001: Online Banking Platform) was used to pay campaign participants.<br><br>Participants were organised in WhatsApp groups (T0129.005: Coordinate on Encrypted/Closed Networks, T0151.007: Chat Broadcast Group, T0151.004: Chat Platform), in which they planned how to get campaign content trending on Twitter (T0121: Manipulate Platform Algorithm, T0151.008: Microblogging Platform). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
File diff suppressed because one or more lines are too long
@ -15,13 +15,14 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.washingtonpost.com/technology/interactive/2021/how-facebook-algorithm-works/](https://www.washingtonpost.com/technology/interactive/2021/how-facebook-algorithm-works/) | 2021/10/26 | Will Oremus, Chris Alcantara, Jeremy B. Merrill, Artur Galocha | The Washington Post | [https://web.archive.org/web/20211026142012/https://www.washingtonpost.com/technology/interactive/2021/how-facebook-algorithm-works/](https://web.archive.org/web/20211026142012/https://www.washingtonpost.com/technology/interactive/2021/how-facebook-algorithm-works/) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000471 This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):<br><br>In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.<br><br>Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.<br><br>In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.<br><br>Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.<br><br>The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.<br><br>[...]<br><br>Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.<br><br>One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.</i> |
|
||||
| [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) | IT00000472 This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):<br><br>In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.<br><br>Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.<br><br>In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.<br><br>Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.<br><br>The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.<br><br>[...]<br><br>Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.<br><br>One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.</i> |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000471 This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):<br><br><i>In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.<br><br>Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.<br><br>In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.<br><br>Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.<br><br>The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.<br><br>[...]<br><br>Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.<br><br>One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.</i> |
|
||||
| [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) | IT00000472 This 2021 report by The Washington Post explains the mechanics of Facebook’s algorithm (T0151.001: Social Media Platform, T0153.006: Content Recommendation Algorithm):<br><br><i>In its early years, Facebook’s algorithm prioritized signals such as likes, clicks and comments to decide which posts to amplify. Publishers, brands and individual users soon learned how to craft posts and headlines designed to induce likes and clicks, giving rise to what came to be known as “clickbait.” By 2013, upstart publishers such as Upworthy and ViralNova were amassing tens of millions of readers with articles designed specifically to game Facebook’s news feed algorithm.<br><br>Facebook realized that users were growing wary of misleading teaser headlines, and the company recalibrated its algorithm in 2014 and 2015 to downgrade clickbait and focus on new metrics, such as the amount of time a user spent reading a story or watching a video, and incorporating surveys on what content users found most valuable. Around the same time, its executives identified video as a business priority, and used the algorithm to boost “native” videos shared directly to Facebook. By the mid-2010s, the news feed had tilted toward slick, professionally produced content, especially videos that would hold people’s attention.<br><br>In 2016, however, Facebook executives grew worried about a decline in “original sharing.” Users were spending so much time passively watching and reading that they weren’t interacting with each other as much. Young people in particular shifted their personal conversations to rivals such as Snapchat that offered more intimacy.<br><br>Once again, Facebook found its answer in the algorithm: It developed a new set of goal metrics that it called “meaningful social interactions,” designed to show users more posts from friends and family, and fewer from big publishers and brands. In particular, the algorithm began to give outsize weight to posts that sparked lots of comments and replies.<br><br>The downside of this approach was that the posts that sparked the most comments tended to be the ones that made people angry or offended them, the documents show. Facebook became an angrier, more polarizing place. It didn’t help that, starting in 2017, the algorithm had assigned reaction emoji — including the angry emoji — five times the weight of a simple “like,” according to company documents.<br><br>[...]<br><br>Internal documents show Facebook researchers found that, for the most politically oriented 1 million American users, nearly 90 percent of the content that Facebook shows them is about politics and social issues. Those groups also received the most misinformation, especially a set of users associated with mostly right-leaning content, who were shown one misinformation post out of every 40, according to a document from June 2020.<br><br>One takeaway is that Facebook’s algorithm isn’t a runaway train. The company may not directly control what any given user posts, but by choosing which types of posts will be seen, it sculpts the information landscape according to its business priorities. Some within the company would like to see Facebook use the algorithm to explicitly promote certain values, such as democracy and civil discourse. Others have suggested that it develop and prioritize new metrics that align with users’ values, as with a 2020 experiment in which the algorithm was trained to predict what posts they would find “good for the world” and “bad for the world,” and optimize for the former.</i> |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,19 +15,20 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.theguardian.com/technology/2023/aug/27/consumers-complaining-x-targeted-scammers-verification-changes-twitter](https://www.theguardian.com/technology/2023/aug/27/consumers-complaining-x-targeted-scammers-verification-changes-twitter) | 2023/08/27 | Anna Tims | The Guardian | [https://web.archive.org/web/20230827142858/https://www.theguardian.com/technology/2023/aug/27/consumers-complaining-x-targeted-scammers-verification-changes-twitter](https://web.archive.org/web/20230827142858/https://www.theguardian.com/technology/2023/aug/27/consumers-complaining-x-targeted-scammers-verification-changes-twitter) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0097.205 Business Persona](../../generated_pages/techniques/T0097.205.md) | IT00000476 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). |
|
||||
| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) | IT00000477 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000478 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). |
|
||||
| [T0146.002 Paid Account](../../generated_pages/techniques/T0146.002.md) | IT00000473 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). |
|
||||
| [T0146.003 Verified Account](../../generated_pages/techniques/T0146.003.md) | IT00000474 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). |
|
||||
| [T0146.005 Lookalike Account ID](../../generated_pages/techniques/T0146.005.md) | IT00000475 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). |
|
||||
| [T0150.001 Newly Created](../../generated_pages/techniques/T0150.001.md) | IT00000480 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000479 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account, T0146.003: Verified Account, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created). |
|
||||
| [T0097.205 Business Persona](../../generated_pages/techniques/T0097.205.md) | IT00000476 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). |
|
||||
| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) | IT00000477 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000478 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). |
|
||||
| [T0146.002 Paid Account Asset](../../generated_pages/techniques/T0146.002.md) | IT00000473 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). |
|
||||
| [T0146.003 Verified Account Asset](../../generated_pages/techniques/T0146.003.md) | IT00000474 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). |
|
||||
| [T0146.005 Lookalike Account ID](../../generated_pages/techniques/T0146.005.md) | IT00000475 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). |
|
||||
| [T0150.001 Newly Created Asset](../../generated_pages/techniques/T0150.001.md) | IT00000480 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000479 <i>Consumers who complain of poor customer service on X are being targeted by scammers after the social media platform formerly known as Twitter changed its account verification process.<br><br>Bank customers and airline passengers are among those at risk of phishing scams when they complain to companies via X. Fraudsters, masquerading as customer service agents, respond under fake X handles and trick victims into disclosing their bank details to get a promised refund.<br><br>They typically win the trust of victims by displaying the blue checkmark icon, which until this year denoted accounts that had been officially verified by X.<br><br>Changes introduced this year allow the icon to be bought by anyone who pays an £11 monthly fee for the site’s subscription service, renamed this month from Twitter Blue to X Premium. Businesses that pay £950 a month receive a gold tick. X’s terms and conditions do not state whether subscriber accounts are pre-vetted.<br><br>Andrew Thomas was contacted by a scam account after posting a complaint to the travel platform Booking.com. “I’d been trying since April to get a refund after our holiday flights were cancelled and finally resorted to X,” he said.<br><br>“I received a response asking me to follow them, and DM [direct message] them with a contact number. They then called me via WhatsApp asking for my reference number so they could investigate. Later they called back to say that I would be refunded via their payment partner for which I’d need to download an app.”<br><br>Thomas became suspicious and checked the X profile. “It looked like the real thing, but I noticed that there was an unexpected hyphen in the Twitter handle and that it had only joined X in July 2023,” he said.</i><br><br>In this example a newly created paid account was created on X, used to direct users to other platforms (T0146.002: Paid Account Asset, T0146.003: Verified Account Asset, T0146.005: Lookalike Account ID, T0097.205: Business Persona, T0122: Direct Users to Alternative Platforms, T0143.003: Impersonated Persona, T0151.008: Microblogging Platform, T0150.001: Newly Created Asset). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,6 +15,7 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.bellingcat.com/news/americas/2019/08/04/the-el-paso-shooting-and-the-gamification-of-terror/](https://www.bellingcat.com/news/americas/2019/08/04/the-el-paso-shooting-and-the-gamification-of-terror/) | 2019/08/04 | Robert Evans | bellingcat | [https://web.archive.org/web/20190804002224/https://www.bellingcat.com/news/americas/2019/08/04/the-el-paso-shooting-and-the-gamification-of-terror/](https://web.archive.org/web/20190804002224/https://www.bellingcat.com/news/americas/2019/08/04/the-el-paso-shooting-and-the-gamification-of-terror/) |
|
||||
|
||||
|
||||
|
||||
|
@ -15,17 +15,18 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://taskandpurpose.com/news/war-thunder-forum-military-technology-leaks/](https://taskandpurpose.com/news/war-thunder-forum-military-technology-leaks/) | 2023/01/20 | Max Hauptman | Task & Purpose | [https://web.archive.org/web/20241116165206/https://taskandpurpose.com/news/war-thunder-forum-military-technology-leaks/](https://web.archive.org/web/20241116165206/https://taskandpurpose.com/news/war-thunder-forum-military-technology-leaks/) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0089.001 Obtain Authentic Documents](../../generated_pages/techniques/T0089.001.md) | IT00000481 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0097.105 Military Personnel Persona](../../generated_pages/techniques/T0097.105.md) | IT00000483 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0115 Post Content](../../generated_pages/techniques/T0115.md) | IT00000484 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) | IT00000485 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0146 Account](../../generated_pages/techniques/T0146.md) | IT00000482 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) | IT00000486 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0089.001 Obtain Authentic Documents](../../generated_pages/techniques/T0089.001.md) | IT00000481 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0097.105 Military Personnel Persona](../../generated_pages/techniques/T0097.105.md) | IT00000483 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0115 Post Content](../../generated_pages/techniques/T0115.md) | IT00000484 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) | IT00000485 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) | IT00000482 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
| [T0151.009 Legacy Online Forum Platform](../../generated_pages/techniques/T0151.009.md) | IT00000486 <i>In an effort to prove that the developers behind a popular multiplayer vehicle combat game had made a mistake, a player went ahead and published classified British military documents about one of the real-life tanks featured in the game.<br><br>This truly bizarre turn of events recently occurred in the public forum for War Thunder, a free-to-player multiplayer combat sim featuring modern land, air, and sea craft. Getting a small detail wrong on a piece of equipment might not be a big deal for the average gamer, but for the War Thunder crowd it sure as hell is. With 25,000 devoted players, the game very much bills itself as the military vehicle combat simulator.<br><br>A player, who identified himself as a British tank commander, claimed that the game’s developers at Gaijin Entertainment had inaccurately represented the Challenger 2 main battle tank used by the British military.<br><br>The self-described tank commander’s bio listed his location as Tidworth Camp in Wiltshire, England, according to the UK Defense Journal, which reported that the base is home to the Royal Tank Regiment, which fields Challenger 2 tanks.<br><br>The player, who went by the handle Pyrophoric, reportedly shared an image on the War Thunder forum of the tank’s specs that were pulled from the Challenger 2’s Army Equipment Support Publication, which is essentially a technical manual. <br><br>[...]<br><br>A moderator for the forum, who’s handle is “Templar_”, explained that the developer had removed the material after they received confirmation from the Ministry of Defense that the document is still in fact classified.</i><br><br>A user of War Thunder’s forums posted confidential documents to win an argument (T0089.001: Obtain Authentic Documents, T0146: Account Asset, T0097.105: Military Personnel Persona, T0115: Post Content, T0143.001: Authentic Persona, T0151.009: Legacy Online Forum Platform). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,19 +15,20 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.nbcnews.com/tech/security/ken-klippenstein-publishes-irans-hacked-trump-campaign-document-substa-rcna172902](https://www.nbcnews.com/tech/security/ken-klippenstein-publishes-irans-hacked-trump-campaign-document-substa-rcna172902) | 2024/09/26 | Kevin Collier | NBC News | [https://web.archive.org/web/20240927013805/https://www.nbcnews.com/tech/security/ken-klippenstein-publishes-irans-hacked-trump-campaign-document-substa-rcna172902](https://web.archive.org/web/20240927013805/https://www.nbcnews.com/tech/security/ken-klippenstein-publishes-irans-hacked-trump-campaign-document-substa-rcna172902) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0089 Obtain Private Documents](../../generated_pages/techniques/T0089.md) | IT00000489 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). |
|
||||
| [T0097.100 Individual Persona](../../generated_pages/techniques/T0097.100.md) | IT00000488 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). |
|
||||
| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) | IT00000490 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). |
|
||||
| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) | IT00000491 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). |
|
||||
| [T0150.003 Pre-Existing](../../generated_pages/techniques/T0150.003.md) | IT00000494 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). |
|
||||
| [T0152.001 Blogging Platform](../../generated_pages/techniques/T0152.001.md) | IT00000492 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). |
|
||||
| [T0152.002 Blog](../../generated_pages/techniques/T0152.002.md) | IT00000493 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). |
|
||||
| [T0153.001 Email Platform](../../generated_pages/techniques/T0153.001.md) | IT00000487 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog, T0150.003: Pre-Existing). |
|
||||
| [T0089 Obtain Private Documents](../../generated_pages/techniques/T0089.md) | IT00000489 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). |
|
||||
| [T0097.100 Individual Persona](../../generated_pages/techniques/T0097.100.md) | IT00000488 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). |
|
||||
| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) | IT00000490 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). |
|
||||
| [T0143.001 Authentic Persona](../../generated_pages/techniques/T0143.001.md) | IT00000491 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). |
|
||||
| [T0150.003 Pre-Existing Asset](../../generated_pages/techniques/T0150.003.md) | IT00000494 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). |
|
||||
| [T0152.001 Blogging Platform](../../generated_pages/techniques/T0152.001.md) | IT00000492 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). |
|
||||
| [T0152.002 Blog Asset](../../generated_pages/techniques/T0152.002.md) | IT00000493 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). |
|
||||
| [T0153.001 Email Platform](../../generated_pages/techniques/T0153.001.md) | IT00000487 <i>An American journalist who runs an independent newsletter published a document [on 26 Sep 2024] that appears to have been stolen from Donald Trump’s presidential campaign — the first public posting of a file that is believed to be part of a dossier that federal officials say is part of an Iranian effort to manipulate the [2024] U.S. election.<br><br>The PDF document is a 271-page opposition research file on former President Donald Trump’s running mate, Sen. JD Vance, R-Ohio.<br><br>For more than two months, hackers who the U.S. says are tied to Iran have tried to persuade the American media to cover files they stole. No outlets took the bait.<br><br>But on Thursday, reporter Ken Klippenstein, who self-publishes on Substack after he left The Intercept this year, published one of the files.<br><br>[...]<br><br>Reporters who have received the documents describe the same pattern: An AOL account emails them files, signed by a person using the name “Robert,” who is reluctant to speak to their identity or reasons for wanting the documents to receive coverage.<br><br>NBC News was not part of the Robert persona’s direct outreach, but it has viewed its correspondence with a reporter at another publication.<br><br> One of the emails from the Robert persona previously viewed by NBC News included three large PDF files, each corresponding to Trump’s three reported finalists for vice president. The Vance file appears to be the one Klippenstein hosts on his site.</i><br><br>In this example hackers attributed to Iran used the Robert persona to email journalists hacked documents (T0146: Account Asset, T0097.100: Individual Persona, T0153.001: Email Platform).<br><br>The journalist Ken Kippenstien used his existing blog on substack to host a link to download the document (T0089: Obtain Private Documents, T0097.102: Journalist Persona, T0115: Post Content, T0143.001: Authentic Persona, T0152.001: Blogging Platform, T0152.002: Blog Asset, T0150.003: Pre-Existing Asset). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,16 +15,17 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [-](-) | 2019/11/21 | Dr Claire Hardaker | - | [https://web.archive.org/web/20220208165928/https://wp.lancs.ac.uk/drclaireh/2019/11/21/factcheckuk-or-fakecheckuk-reinventing-the-political-faction-as-the-impartial-factchecker/](https://web.archive.org/web/20220208165928/https://wp.lancs.ac.uk/drclaireh/2019/11/21/factcheckuk-or-fakecheckuk-reinventing-the-political-faction-as-the-impartial-factchecker/) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0097.203 Fact Checking Organisation Persona](../../generated_pages/techniques/T0097.203.md) | IT00000497 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:<br><br><iThe evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.<br><br>That is, until a few minutes into the debate.<br><br>All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.<br><br>The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.<br><br>The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it</i><br><br>In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000498 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:<br><br><iThe evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.<br><br>That is, until a few minutes into the debate.<br><br>All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.<br><br>The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.<br><br>The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it</i><br><br>In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0146.003 Verified Account](../../generated_pages/techniques/T0146.003.md) | IT00000499 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:<br><br><iThe evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.<br><br>That is, until a few minutes into the debate.<br><br>All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.<br><br>The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.<br><br>The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it</i><br><br>In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0150.003 Pre-Existing](../../generated_pages/techniques/T0150.003.md) | IT00000496 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:<br><br><iThe evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.<br><br>That is, until a few minutes into the debate.<br><br>All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.<br><br>The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.<br><br>The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it</i><br><br>In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000495 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:<br><br><iThe evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.<br><br>That is, until a few minutes into the debate.<br><br>All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.<br><br>The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.<br><br>The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it</i><br><br>In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing, T0146.003: Verified Account, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0097.203 Fact Checking Organisation Persona](../../generated_pages/techniques/T0097.203.md) | IT00000497 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:<br><br><i>The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.<br><br>That is, until a few minutes into the debate.<br><br>All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.<br><br>The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.<br><br>The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it</i><br><br>In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000498 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:<br><br><i>The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.<br><br>That is, until a few minutes into the debate.<br><br>All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.<br><br>The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.<br><br>The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it</i><br><br>In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0146.003 Verified Account Asset](../../generated_pages/techniques/T0146.003.md) | IT00000499 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:<br><br><i>The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.<br><br>That is, until a few minutes into the debate.<br><br>All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.<br><br>The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.<br><br>The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it</i><br><br>In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0150.003 Pre-Existing Asset](../../generated_pages/techniques/T0150.003.md) | IT00000496 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:<br><br><i>The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.<br><br>That is, until a few minutes into the debate.<br><br>All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.<br><br>The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.<br><br>The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it</i><br><br>In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000495 Ahead of the 2019 UK Election during a leader’s debate, the Conservative party rebranded their “Conservative Campaign Headquarters Press” account to “FactCheckUK”:<br><br><i>The evening of the 19th November 2019 saw the first of three Leaders’ Debates on ITV, starting at 8pm and lasting for an hour. Current Prime Minister and leader of the Conservatives, Boris Johnson faced off against Labour party leader, Jeremy Corbyn. Plenty of people will have been watching the debate live, but a good proportion were “watching” (er, “twitching”?) via Twitter. This is something I’ve done in the past for certain shows. In some cases I just can’t watch or listen, but I can read, and in other cases, the commentary is far more interesting and entertaining than the show itself will ever be. This, for me, is just such a case. But very quickly, all eyes turned upon a modestly sized account with the handle @CCHQPress. That’s short for Conservative Campaign Headquarters Press. According to their (current!) Twitter bio, they are based in Westminster and they provide “snippets of news and commentary from CCHQ” to their 75k followers.<br><br>That is, until a few minutes into the debate.<br><br>All at once, like a person throwing off their street clothes to reveal some sinister new identity underneath, @CCHQPress abruptly shed its name, blue Conservative logo, Boris Johnson banner, and bio description. Moments later, it had entirely reinvented itself.<br><br>The purple banner was emblazoned with white font that read “✓ factcheckUK [with a “FROM CCQH” subheading]”.<br><br>The matching profile picture was a white tick in a purple circle. The bio was updated to: “Fact checking Labour from CCHQ”. And the name now read factcheckUK, with the customary Twitter blue (or white depending on your phone settings!) validation tick still after it</i><br><br>In this example an existing verified social media account on Twitter was repurposed to inauthentically present itself as a Fact Checking service (T0151.008: Microblogging Platform, T0150.003: Pre-Existing Asset, T0146.003: Verified Account Asset, T0097.203: Fact Checking Organisation Persona, T0143.002: Fabricated Persona). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,14 +15,15 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://checkfirst.network/wp-content/uploads/2024/06/Operation_Overload_WEB.pdf](https://checkfirst.network/wp-content/uploads/2024/06/Operation_Overload_WEB.pdf) | 2024/06/01 | Aleksandra Atanasova, Amaury Lesplingart, Francesco Poldi, Guillaume Kuster | CheckFirst | [https://web.archive.org/web/20240928205427/https://checkfirst.network/wp-content/uploads/2024/06/Operation_Overload_WEB.pdf](https://web.archive.org/web/20240928205427/https://checkfirst.network/wp-content/uploads/2024/06/Operation_Overload_WEB.pdf) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000501 <i>The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik. <br><br>We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.<br><br>[...]<br><br>The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.<br><br>[...]<br><br>All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.</i><br><br>In this example, threat actors used gmail accounts (T0146.001: Free Account, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. |
|
||||
| [T0146.001 Free Account](../../generated_pages/techniques/T0146.001.md) | IT00000500 <i>The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik. <br><br>We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.<br><br>[...]<br><br>The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.<br><br>[...]<br><br>All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.</i><br><br>In this example, threat actors used gmail accounts (T0146.001: Free Account, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. |
|
||||
| [T0153.001 Email Platform](../../generated_pages/techniques/T0153.001.md) | IT00000502 <i>The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik. <br><br>We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.<br><br>[...]<br><br>The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.<br><br>[...]<br><br>All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.</i><br><br>In this example, threat actors used gmail accounts (T0146.001: Free Account, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. |
|
||||
| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000501 <i>The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik. <br><br>We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.<br><br>[...]<br><br>The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.<br><br>[...]<br><br>All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.</i><br><br>In this example, threat actors used gmail accounts (T0146.001: Free Account Asset, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. |
|
||||
| [T0146.001 Free Account Asset](../../generated_pages/techniques/T0146.001.md) | IT00000500 <i>The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik. <br><br>We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.<br><br>[...]<br><br>The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.<br><br>[...]<br><br>All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.</i><br><br>In this example, threat actors used gmail accounts (T0146.001: Free Account Asset, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. |
|
||||
| [T0153.001 Email Platform](../../generated_pages/techniques/T0153.001.md) | IT00000502 <i>The unique aspect of Operation Overload is a barrage of emails sent to newsrooms and fact-checkers across Europe. The authors of these messages urge recipients to verify content allegedly found online. The email subject lines often include an incitement to verify the claims briefly described in the message body. This is followed by a short list of links directing recipients to posts on Telegram, X, or known pro-Russian websites, including Pravda and Sputnik. <br><br>We have collected 221 emails sent to 20 organisations. The organisations mostly received identical emails urging them to fact-check specific false stories, which demonstrates that the emails were sent as part of a larger coordinated campaign.<br><br>[...]<br><br>The authors of the emails do not hide their intention to see the fake content widely spread. In February 2024, a journalist at the German outlet CORRECTIV engaged with the sender of one of the emails, providing feedback on the narratives which were originally sent. CORRECTIV received a response from the same Gmail address, initially expressing respect and trust in CORRECTIV’s assessment, while asking: “is it possible for your work to be seen by as many people as possible?”, thereby clearly stating the goal of the operation.<br><br>[...]<br><br>All the emails come from authors posing as concerned citizens. All emails are sent with Gmail accounts, which is typical for personal use. This makes it challenging to identify the individuals behind these emails, as anyone can open a Gmail account for free. The email headers indicate that the messages were sent from the Gmail interface, not from a personal client which would disclose the sender’s IP address.</i><br><br>In this example, threat actors used gmail accounts (T0146.001: Free Account Asset, T0097.100: Individual Persona, T0143.002: Fabricated Persona, T0153.001: Email Platform) to target journalists and fact-checkers, with the apparent goal of having them amplify operation narratives through fact checks. |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,6 +15,7 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.isdglobal.org/wp-content/uploads/2021/08/04-gaming-report-discord.pdf](https://www.isdglobal.org/wp-content/uploads/2021/08/04-gaming-report-discord.pdf) | 2021/08/04 | Aoife Gallagher, Ciaran O’Connor, Pierre Vaux, Elise Thomas, Jacob Davey | ISD | [https://web.archive.org/web/20240528231254/https://www.isdglobal.org/wp-content/uploads/2021/08/04-gaming-report-discord.pdf](https://web.archive.org/web/20240528231254/https://www.isdglobal.org/wp-content/uploads/2021/08/04-gaming-report-discord.pdf) |
|
||||
|
||||
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.isdglobal.org/wp-content/uploads/2021/08/02-revised-gaming-report-steam.pdf](https://www.isdglobal.org/wp-content/uploads/2021/08/02-revised-gaming-report-steam.pdf) | 2021/08/02 | Pierre Vaux, Aoife Gallagher, Jacob Davey | ISD | [https://web.archive.org/web/20240912223614/https://www.isdglobal.org/wp-content/uploads/2021/08/02-revised-gaming-report-steam.pdf](https://web.archive.org/web/20240912223614/https://www.isdglobal.org/wp-content/uploads/2021/08/02-revised-gaming-report-steam.pdf) |
|
||||
|
||||
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.mpf.se/publikationer/publikationer/2023-12-01-malign-foreign-interference-and-information-influence-on-video-game-platforms-understanding-the-adversarial-playbook](https://www.mpf.se/publikationer/publikationer/2023-12-01-malign-foreign-interference-and-information-influence-on-video-game-platforms-understanding-the-adversarial-playbook) | 2023/03/01 | Jesper Falkheimer, Elsa Isaksson, James Pamment | Myndigheten för psykologiskt försvar | [https://web.archive.org/web/20240520215840/https://www.mpf.se/publikationer/publikationer/2023-12-01-malign-foreign-interference-and-information-influence-on-video-game-platforms-understanding-the-adversarial-playbook](https://web.archive.org/web/20240520215840/https://www.mpf.se/publikationer/publikationer/2023-12-01-malign-foreign-interference-and-information-influence-on-video-game-platforms-understanding-the-adversarial-playbook) |
|
||||
|
||||
|
||||
|
||||
|
@ -15,17 +15,17 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.nytimes.com/2015/06/07/magazine/the-agency.html?](https://www.nytimes.com/2015/06/07/magazine/the-agency.html?) | 2015/06/07 | Adrian Chen | The New York Times | [https://web.archive.org/web/20241007172237/https://www.nytimes.com/2015/06/07/magazine/the-agency.html](https://web.archive.org/web/20241007172237/https://www.nytimes.com/2015/06/07/magazine/the-agency.html) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) | IT00000524 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
| [T0097.101 Local Persona](../../generated_pages/techniques/T0097.101.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000521 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
| [T0146 Account](../../generated_pages/techniques/T0146.md) | IT00000520 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
| [T0150.004 Repurposed](../../generated_pages/techniques/T0150.004.md) | IT00000523 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000522 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
| [T0087 Develop Video-Based Content](../../generated_pages/techniques/T0087.md) | IT00000524 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
| [T0143.002 Fabricated Persona](../../generated_pages/techniques/T0143.002.md) | IT00000521 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) | IT00000520 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
| [T0150.004 Repurposed Asset](../../generated_pages/techniques/T0150.004.md) | IT00000523 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000522 In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,18 +15,19 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.volexity.com/blog/2023/06/28/charming-kitten-updates-powerstar-with-an-interplanetary-twist/](https://www.volexity.com/blog/2023/06/28/charming-kitten-updates-powerstar-with-an-interplanetary-twist/) | 2023/06/28 | Ankur Saini, Charlie Gardner | Volexity | [https://web.archive.org/web/20241009175500/https://www.volexity.com/blog/2023/06/28/charming-kitten-updates-powerstar-with-an-interplanetary-twist/](https://web.archive.org/web/20241009175500/https://www.volexity.com/blog/2023/06/28/charming-kitten-updates-powerstar-with-an-interplanetary-twist/) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0085.004 Develop Document](../../generated_pages/techniques/T0085.004.md) | IT00000530 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). |
|
||||
| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) | IT00000525 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). |
|
||||
| [T0097.202 News Outlet Persona](../../generated_pages/techniques/T0097.202.md) | IT00000526 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000527 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). |
|
||||
| [T0147.003 Malware](../../generated_pages/techniques/T0147.003.md) | IT00000531 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). |
|
||||
| [T0149.002 Email Domain](../../generated_pages/techniques/T0149.002.md) | IT00000529 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). |
|
||||
| [T0149.003 Lookalike Domain](../../generated_pages/techniques/T0149.003.md) | IT00000528 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware). |
|
||||
| [T0085.004 Develop Document](../../generated_pages/techniques/T0085.004.md) | IT00000530 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). |
|
||||
| [T0097.102 Journalist Persona](../../generated_pages/techniques/T0097.102.md) | IT00000525 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). |
|
||||
| [T0097.202 News Outlet Persona](../../generated_pages/techniques/T0097.202.md) | IT00000526 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000527 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). |
|
||||
| [T0147.003 Malware Asset](../../generated_pages/techniques/T0147.003.md) | IT00000531 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). |
|
||||
| [T0149.002 Email Domain Asset](../../generated_pages/techniques/T0149.002.md) | IT00000529 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). |
|
||||
| [T0149.003 Lookalike Domain](../../generated_pages/techniques/T0149.003.md) | IT00000528 <i>The target of the recently observed [highly targeted spearphishing attack by “Charming Kitten”, a hacker group attributed to Iran] had published an article related to Iran. The publicity appears to have garnered the attention of Charming Kitten, who subsequently created an email address to impersonate a reporter of an Israeli media organization in order to send the target an email. Prior to sending malware to the target, the attacker simply asked if the target would be open to reviewing a document they had written related to US foreign policy. The target agreed to do so, since this was not an unusual request; they are frequently asked by journalists to review opinion pieces relating to their field of work.<br><br>In an effort to further gain the target’s confidence, Charming Kitten continued the interaction with another benign email containing a list of questions, to which the target then responded with answers. After multiple days of benign and seemingly legitimate interaction, Charming Kitten finally sent a “draft report”; this was the first time anything opaquely malicious occurred. The “draft report” was, in fact, a password-protected RAR file containing a malicious LNK file. The password for the RAR file was provided in a subsequent email.</i><br><br>In this example, threat actors created an email address on a domain which impersonated an existing Israeli news organisation impersonating a reporter who worked there (T0097.102: Journalist Persona, T0097.202: News Outlet Persona, T0143.003: Impersonated Persona, T0149.003: Lookalike Domain, T0149.002: Email Domain Asset) in order to convince the target to download a document containing malware (T0085.004: Develop Document, T0147.003: Malware Asset). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,6 +15,7 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.darkreading.com/threat-intelligence/iranian-apts-dress-up-as-hacktivists-for-disruption-influence-ops](https://www.darkreading.com/threat-intelligence/iranian-apts-dress-up-as-hacktivists-for-disruption-influence-ops) | 2024/02/21 | Nate Nelson | DarkReading | [https://web.archive.org/web/20240221113558/https://www.darkreading.com/threat-intelligence/iranian-apts-dress-up-as-hacktivists-for-disruption-influence-ops](https://web.archive.org/web/20240221113558/https://www.darkreading.com/threat-intelligence/iranian-apts-dress-up-as-hacktivists-for-disruption-influence-ops) |
|
||||
|
||||
|
||||
|
||||
|
@ -15,17 +15,18 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://medium.com/dfrlab/trolltracker-outward-influence-operation-from-iran-cc4539684c8d](https://medium.com/dfrlab/trolltracker-outward-influence-operation-from-iran-cc4539684c8d) | 2019/01/31 | Kanishk Karan, Donara Barojan, Melissa Hall, Graham Brookie | DFRLab | [https://web.archive.org/web/20240618014243/https://medium.com/dfrlab/trolltracker-outward-influence-operation-from-iran-cc4539684c8d](https://web.archive.org/web/20240618014243/https://medium.com/dfrlab/trolltracker-outward-influence-operation-from-iran-cc4539684c8d) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0097.208 Social Cause Persona](../../generated_pages/techniques/T0097.208.md) | IT00000534 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). |
|
||||
| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) | IT00000537 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000535 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). |
|
||||
| [T0151.002 Online Community Group](../../generated_pages/techniques/T0151.002.md) | IT00000536 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). |
|
||||
| [T0152.004 Website](../../generated_pages/techniques/T0152.004.md) | IT00000539 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). |
|
||||
| [T0153.004 QR Code](../../generated_pages/techniques/T0153.004.md) | IT00000538 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code), which took users to a document hosted on another website (T0152.004: Website). |
|
||||
| [T0097.208 Social Cause Persona](../../generated_pages/techniques/T0097.208.md) | IT00000534 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). |
|
||||
| [T0122 Direct Users to Alternative Platforms](../../generated_pages/techniques/T0122.md) | IT00000537 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). |
|
||||
| [T0151.001 Social Media Platform](../../generated_pages/techniques/T0151.001.md) | IT00000535 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). |
|
||||
| [T0151.002 Online Community Group](../../generated_pages/techniques/T0151.002.md) | IT00000536 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). |
|
||||
| [T0152.004 Website Asset](../../generated_pages/techniques/T0152.004.md) | IT00000539 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). |
|
||||
| [T0153.004 QR Code Asset](../../generated_pages/techniques/T0153.004.md) | IT00000538 <i>[Meta removed a network of assets for coordinated inauthentic behaviour. One page] in the network, @StopMEK, was promoting views against the People’s Mujahedin of Iran (MEK), the largest and most active political opposition group against the Islamic Republic of Iran Leadership.<br><br>The content on the page drew narratives showing parallels between the Islamic State of Iraq and Syria (ISIS) and the MEK.<br><br>Apart from images and memes, the @StopMEK page shared a link to an archived report on how the United States was monitoring the MEK’s movement in Iran in the mid-1990’s. The file was embedded as a QR code on one of the page’s images.</i><br><br>In this example a Facebook page presented itself as focusing on a political cause (T0097.208: Social Cause Persona, T0151.001: Social Media Platform, T0151.002: Online Community Group). Within the page it embedded a QR code (T0122: Direct Users to Alternative Platforms, T0153.004: QR Code Asset), which took users to a document hosted on another website (T0152.004: Website Asset). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -15,18 +15,18 @@
|
||||
|
||||
| Reference | Pub Date | Authors | Org | Archive |
|
||||
| --------- | -------- | ------- | --- | ------- |
|
||||
| [https://www.theguardian.com/technology/2021/mar/16/florida-teen-sentenced-twitter-bitcoin-hack](https://www.theguardian.com/technology/2021/mar/16/florida-teen-sentenced-twitter-bitcoin-hack) | 2021/03/16 | Kari Paul | The Guardian | [https://web.archive.org/web/20240920230915/https://www.theguardian.com/technology/2021/mar/16/florida-teen-sentenced-twitter-bitcoin-hack](https://web.archive.org/web/20240920230915/https://www.theguardian.com/technology/2021/mar/16/florida-teen-sentenced-twitter-bitcoin-hack) |
|
||||
|
||||
|
||||
|
||||
| Technique | Description given for this incident |
|
||||
| --------- | ------------------------- |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000544 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0146.004 Administrator Account](../../generated_pages/techniques/T0146.004.md) | IT00000543 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0146.005 Lookalike Account ID](../../generated_pages/techniques/T0146.005.md) | IT00000540 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0148.009 Cryptocurrency Wallet](../../generated_pages/techniques/T0148.009.md) | IT00000546 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) | IT00000541 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) | IT00000545 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000542 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account, T0150.005: Compromised, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account, T0143.003: Impersonated Persona, T0150.005: Compromised, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0143.003 Impersonated Persona](../../generated_pages/techniques/T0143.003.md) | IT00000544 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0146.004 Administrator Account Asset](../../generated_pages/techniques/T0146.004.md) | IT00000543 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0148.009 Cryptocurrency Wallet](../../generated_pages/techniques/T0148.009.md) | IT00000546 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) | IT00000541 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) | IT00000545 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
| [T0151.008 Microblogging Platform](../../generated_pages/techniques/T0151.008.md) | IT00000542 <i>An 18-year-old hacker who pulled off a huge breach in 2020, infiltrating several high profile Twitter accounts to solicit bitcoin transactions, has agreed to serve three years in prison for his actions.<br><br>Graham Ivan Clark, of Florida, was 17 years old at the time of the hack in July, during which he took over a number of major accounts including those of Joe Biden, Bill Gates and Kim Kardashian West.<br><br>Once he accessed them, Clark tweeted a link to a bitcoin address and wrote “all bitcoin sent to our address below will be sent back to you doubled!” According to court documents, Clark made more than $100,000 from the scheme, which his lawyers say he has since returned.<br><br>Clark was able to access the accounts after convincing an employee at Twitter he worked in the company’s information technology department, according to the Tampa Bay Times.</i><br><br>In this example a threat actor gained access to Twitter’s customer service portal through social engineering (T0146.004: Administrator Account Asset, T0150.005: Compromised Asset, T0151.008: Microblogging Platform), which they used to take over accounts of public figures (T0146.003: Verified Account Asset, T0143.003: Impersonated Persona, T0150.005: Compromised Asset, T0151.008: Microblogging Platform).<br><br>The threat actor used these compromised accounts to trick their followers into sending bitcoin to their wallet (T0148.009: Cryptocurrency Wallet). |
|
||||
|
||||
|
||||
DO NOT EDIT ABOVE THIS LINE - PLEASE ADD NOTES BELOW
|
@ -49,22 +49,22 @@
|
||||
| [T0089 Obtain Private Documents](../../generated_pages/techniques/T0089.md) |
|
||||
| [T0089.001 Obtain Authentic Documents](../../generated_pages/techniques/T0089.001.md) |
|
||||
| [T0089.003 Alter Authentic Documents](../../generated_pages/techniques/T0089.003.md) |
|
||||
| [T0146 Account](../../generated_pages/techniques/T0146.md) |
|
||||
| [T0146.001 Free Account](../../generated_pages/techniques/T0146.001.md) |
|
||||
| [T0146.002 Paid Account](../../generated_pages/techniques/T0146.002.md) |
|
||||
| [T0146.003 Verified Account](../../generated_pages/techniques/T0146.003.md) |
|
||||
| [T0146.004 Administrator Account](../../generated_pages/techniques/T0146.004.md) |
|
||||
| [T0146 Account Asset](../../generated_pages/techniques/T0146.md) |
|
||||
| [T0146.001 Free Account Asset](../../generated_pages/techniques/T0146.001.md) |
|
||||
| [T0146.002 Paid Account Asset](../../generated_pages/techniques/T0146.002.md) |
|
||||
| [T0146.003 Verified Account Asset](../../generated_pages/techniques/T0146.003.md) |
|
||||
| [T0146.004 Administrator Account Asset](../../generated_pages/techniques/T0146.004.md) |
|
||||
| [T0146.005 Lookalike Account ID](../../generated_pages/techniques/T0146.005.md) |
|
||||
| [T0146.006 Open Access Platform](../../generated_pages/techniques/T0146.006.md) |
|
||||
| [T0146.007 Automated Account](../../generated_pages/techniques/T0146.007.md) |
|
||||
| [T0147 Software](../../generated_pages/techniques/T0147.md) |
|
||||
| [T0147.001 Game](../../generated_pages/techniques/T0147.001.md) |
|
||||
| [T0147.002 Game Mod](../../generated_pages/techniques/T0147.002.md) |
|
||||
| [T0147.003 Malware](../../generated_pages/techniques/T0147.003.md) |
|
||||
| [T0147.004 Mobile App](../../generated_pages/techniques/T0147.004.md) |
|
||||
| [T0146.007 Automated Account Asset](../../generated_pages/techniques/T0146.007.md) |
|
||||
| [T0147 Software Asset](../../generated_pages/techniques/T0147.md) |
|
||||
| [T0147.001 Game Asset](../../generated_pages/techniques/T0147.001.md) |
|
||||
| [T0147.002 Game Mod Asset](../../generated_pages/techniques/T0147.002.md) |
|
||||
| [T0147.003 Malware Asset](../../generated_pages/techniques/T0147.003.md) |
|
||||
| [T0147.004 Mobile App Asset](../../generated_pages/techniques/T0147.004.md) |
|
||||
| [T0148 Financial Instrument](../../generated_pages/techniques/T0148.md) |
|
||||
| [T0148.001 Online Banking Platform](../../generated_pages/techniques/T0148.001.md) |
|
||||
| [T0148.002 Bank Account](../../generated_pages/techniques/T0148.002.md) |
|
||||
| [T0148.002 Bank Account Asset](../../generated_pages/techniques/T0148.002.md) |
|
||||
| [T0148.003 Payment Processing Platform](../../generated_pages/techniques/T0148.003.md) |
|
||||
| [T0148.004 Payment Processing Capability](../../generated_pages/techniques/T0148.004.md) |
|
||||
| [T0148.005 Subscription Processing Capability](../../generated_pages/techniques/T0148.005.md) |
|
||||
@ -73,24 +73,24 @@
|
||||
| [T0148.008 Cryptocurrency Exchange Platform](../../generated_pages/techniques/T0148.008.md) |
|
||||
| [T0148.009 Cryptocurrency Wallet](../../generated_pages/techniques/T0148.009.md) |
|
||||
| [T0149 Online Infrastructure](../../generated_pages/techniques/T0149.md) |
|
||||
| [T0149.001 Domain](../../generated_pages/techniques/T0149.001.md) |
|
||||
| [T0149.002 Email Domain](../../generated_pages/techniques/T0149.002.md) |
|
||||
| [T0149.001 Domain Asset](../../generated_pages/techniques/T0149.001.md) |
|
||||
| [T0149.002 Email Domain Asset](../../generated_pages/techniques/T0149.002.md) |
|
||||
| [T0149.003 Lookalike Domain](../../generated_pages/techniques/T0149.003.md) |
|
||||
| [T0149.004 Redirecting Domain](../../generated_pages/techniques/T0149.004.md) |
|
||||
| [T0149.005 Server](../../generated_pages/techniques/T0149.005.md) |
|
||||
| [T0149.006 IP Address](../../generated_pages/techniques/T0149.006.md) |
|
||||
| [T0149.007 VPN](../../generated_pages/techniques/T0149.007.md) |
|
||||
| [T0149.008 Proxy IP Address](../../generated_pages/techniques/T0149.008.md) |
|
||||
| [T0149.004 Redirecting Domain Asset](../../generated_pages/techniques/T0149.004.md) |
|
||||
| [T0149.005 Server Asset](../../generated_pages/techniques/T0149.005.md) |
|
||||
| [T0149.006 IP Address Asset](../../generated_pages/techniques/T0149.006.md) |
|
||||
| [T0149.007 VPN Asset](../../generated_pages/techniques/T0149.007.md) |
|
||||
| [T0149.008 Proxy IP Address Asset](../../generated_pages/techniques/T0149.008.md) |
|
||||
| [T0149.009 Internet Connected Physical Asset](../../generated_pages/techniques/T0149.009.md) |
|
||||
| [T0150 Asset Origin](../../generated_pages/techniques/T0150.md) |
|
||||
| [T0150.001 Newly Created](../../generated_pages/techniques/T0150.001.md) |
|
||||
| [T0150.002 Dormant](../../generated_pages/techniques/T0150.002.md) |
|
||||
| [T0150.003 Pre-Existing](../../generated_pages/techniques/T0150.003.md) |
|
||||
| [T0150.004 Repurposed](../../generated_pages/techniques/T0150.004.md) |
|
||||
| [T0150.005 Compromised](../../generated_pages/techniques/T0150.005.md) |
|
||||
| [T0150.006 Purchased](../../generated_pages/techniques/T0150.006.md) |
|
||||
| [T0150.007 Rented](../../generated_pages/techniques/T0150.007.md) |
|
||||
| [T0150.008 Bulk Created](../../generated_pages/techniques/T0150.008.md) |
|
||||
| [T0150.001 Newly Created Asset](../../generated_pages/techniques/T0150.001.md) |
|
||||
| [T0150.002 Dormant Asset](../../generated_pages/techniques/T0150.002.md) |
|
||||
| [T0150.003 Pre-Existing Asset](../../generated_pages/techniques/T0150.003.md) |
|
||||
| [T0150.004 Repurposed Asset](../../generated_pages/techniques/T0150.004.md) |
|
||||
| [T0150.005 Compromised Asset](../../generated_pages/techniques/T0150.005.md) |
|
||||
| [T0150.006 Purchased Asset](../../generated_pages/techniques/T0150.006.md) |
|
||||
| [T0150.007 Rented Asset](../../generated_pages/techniques/T0150.007.md) |
|
||||
| [T0150.008 Bulk Created Asset](../../generated_pages/techniques/T0150.008.md) |
|
||||
|
||||
|
||||
|
||||
|
@ -42,9 +42,9 @@
|
||||
| [T0151.017 Dating Platform](../../generated_pages/techniques/T0151.017.md) |
|
||||
| [T0152 Digital Content Hosting Asset](../../generated_pages/techniques/T0152.md) |
|
||||
| [T0152.001 Blogging Platform](../../generated_pages/techniques/T0152.001.md) |
|
||||
| [T0152.002 Blog](../../generated_pages/techniques/T0152.002.md) |
|
||||
| [T0152.002 Blog Asset](../../generated_pages/techniques/T0152.002.md) |
|
||||
| [T0152.003 Website Hosting Platform](../../generated_pages/techniques/T0152.003.md) |
|
||||
| [T0152.004 Website](../../generated_pages/techniques/T0152.004.md) |
|
||||
| [T0152.004 Website Asset](../../generated_pages/techniques/T0152.004.md) |
|
||||
| [T0152.005 Paste Platform](../../generated_pages/techniques/T0152.005.md) |
|
||||
| [T0152.006 Video Platform](../../generated_pages/techniques/T0152.006.md) |
|
||||
| [T0152.007 Audio Platform](../../generated_pages/techniques/T0152.007.md) |
|
||||
@ -56,8 +56,8 @@
|
||||
| [T0153 Digital Content Delivery Asset](../../generated_pages/techniques/T0153.md) |
|
||||
| [T0153.001 Email Platform](../../generated_pages/techniques/T0153.001.md) |
|
||||
| [T0153.002 Link Shortening Platform](../../generated_pages/techniques/T0153.002.md) |
|
||||
| [T0153.003 Shortened Link](../../generated_pages/techniques/T0153.003.md) |
|
||||
| [T0153.004 QR Code](../../generated_pages/techniques/T0153.004.md) |
|
||||
| [T0153.003 Shortened Link Asset](../../generated_pages/techniques/T0153.003.md) |
|
||||
| [T0153.004 QR Code Asset](../../generated_pages/techniques/T0153.004.md) |
|
||||
| [T0153.005 Online Advertising Platform](../../generated_pages/techniques/T0153.005.md) |
|
||||
| [T0153.006 Content Recommendation Algorithm](../../generated_pages/techniques/T0153.006.md) |
|
||||
| [T0153.007 Direct Messaging](../../generated_pages/techniques/T0153.007.md) |
|
||||
@ -65,12 +65,12 @@
|
||||
| [T0154.001 AI LLM Platform](../../generated_pages/techniques/T0154.001.md) |
|
||||
| [T0154.002 AI Media Platform](../../generated_pages/techniques/T0154.002.md) |
|
||||
| [T0155 Gated Asset](../../generated_pages/techniques/T0155.md) |
|
||||
| [T0155.001 Password Gated](../../generated_pages/techniques/T0155.001.md) |
|
||||
| [T0155.002 Invite Gated](../../generated_pages/techniques/T0155.002.md) |
|
||||
| [T0155.003 Approval Gated](../../generated_pages/techniques/T0155.003.md) |
|
||||
| [T0155.004 Geoblocked](../../generated_pages/techniques/T0155.004.md) |
|
||||
| [T0155.005 Paid Access](../../generated_pages/techniques/T0155.005.md) |
|
||||
| [T0155.006 Subscription Access](../../generated_pages/techniques/T0155.006.md) |
|
||||
| [T0155.001 Password Gated Asset](../../generated_pages/techniques/T0155.001.md) |
|
||||
| [T0155.002 Invite Gated Asset](../../generated_pages/techniques/T0155.002.md) |
|
||||
| [T0155.003 Approval Gated Asset](../../generated_pages/techniques/T0155.003.md) |
|
||||
| [T0155.004 Geoblocked Asset](../../generated_pages/techniques/T0155.004.md) |
|
||||
| [T0155.005 Paid Access Asset](../../generated_pages/techniques/T0155.005.md) |
|
||||
| [T0155.006 Subscription Access Asset](../../generated_pages/techniques/T0155.006.md) |
|
||||
| [T0155.007 Encrypted Communication Channel](../../generated_pages/techniques/T0155.007.md) |
|
||||
|
||||
|
||||
|
@ -7,8 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00033 China 50cent Army](../../generated_pages/incidents/I00033.md) | facilitate state propaganda and defuse crises |
|
||||
| [I00034 DibaFacebookExpedition](../../generated_pages/incidents/I00034.md) | Netizens from one of the largest discussion forums in China, known as Diba, coordinated to overcome China’s Great Firewall |
|
||||
|
||||
|
||||
|
||||
|
@ -7,17 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00005 Brexit vote](../../generated_pages/incidents/I00005.md) | cultivate, manipulate, exploit useful idiots |
|
||||
| [I00007 Incirlik terrorists](../../generated_pages/incidents/I00007.md) | cultivate, manipulate, exploit useful idiots (in the case Paul Manafort) |
|
||||
| [I00010 ParklandTeens](../../generated_pages/incidents/I00010.md) | cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories; false flags, crisis actors) |
|
||||
| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | cultivate, manipulate, exploit useful idiots |
|
||||
| [I00029 MH17 investigation](../../generated_pages/incidents/I00029.md) | cultivate, manipulate, exploit useful idiots |
|
||||
| [I00032 Kavanaugh](../../generated_pages/incidents/I00032.md) | cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories) |
|
||||
| [I00044 JadeHelm exercise](../../generated_pages/incidents/I00044.md) | cultivate, manipulate, exploit useful idiots (Alex Jones... drives conspiracy theories) |
|
||||
| [I00049 White Helmets: Chemical Weapons](../../generated_pages/incidents/I00049.md) | cultivate, manipulate, exploit useful idiots (Roger Waters; Venessa Beeley...) |
|
||||
| [I00050 #HandsOffVenezuela](../../generated_pages/incidents/I00050.md) | cultivate, manipulate, exploit useful idiots (Roger Waters) |
|
||||
| [I00051 Integrity Initiative](../../generated_pages/incidents/I00051.md) | cultivate, manipulate, exploit useful idiots |
|
||||
| [I00063 Olympic Doping Scandal](../../generated_pages/incidents/I00063.md) | cultivate, manipulate, exploit useful idiots |
|
||||
|
||||
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00006 Columbian Chemicals](../../generated_pages/incidents/I00006.md) | Create and use hashtag |
|
||||
| [I00086 #WeAreNotSafe – Exposing How a Post-October 7th Disinformation Network Operates on Israeli Social Media](../../generated_pages/incidents/I00086.md) | In this report accounts were identified as part of “a sophisticated and extensive coordinated network orchestrating a disinformation campaign targeting Israeli digital spaces since October 7th, 2023”, which posted hashtags alongside campaign content (T0015: Create Hashtags and Search Artefacts):<br><br><i>“The accounts post generic images to fill their account feed to make the account seem real. They then employ a hidden hashtag in their posts, consisting of a seemingly random string of numbers and letters.<br><br>“The hypothesis regarding this tactic is that the group orchestrating these accounts utilizes these hashtags as a means of indexing them. This system likely serves a dual purpose: firstly, to keep track of the network’s expansive network of accounts and unique posts, and secondly, to streamline the process of boosting engagement among these accounts. By searching for these specific, unique hashtags, the group can quickly locate posts from their network and engage with them using other fake accounts, thereby artificially inflating the visibility and perceived authenticity of the fake account.”</i> |
|
||||
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | Click-bait (economic actors) fake news sites (ie: Denver Guardian; Macedonian teens) |
|
||||
| [I00079 Three thousand fake tanks](../../generated_pages/incidents/I00079.md) | <i>“On January 4 [2017], however, the Donbas News International (DNI) agency, based in Donetsk, Ukraine, and (since September 2016) an official state media outlet of the unrecognized separatist Donetsk People’s Republic, ran an article under the sensational headline, “US sends 3,600 tanks against Russia — massive NATO deployment under way.” DNI is run by Finnish exile Janus Putkonen, described by the Finnish national broadcaster, YLE, as a “Finnish info warrior”, and the first foreigner to be granted a Donetsk passport.<br><br>“The equally sensational opening paragraph ran, “The NATO war preparation against Russia, ‘Operation Atlantic Resolve’, is in full swing. 2,000 US tanks will be sent in coming days from Germany to Eastern Europe, and 1,600 US tanks is deployed to storage facilities in the Netherlands. At the same time, NATO countries are sending thousands of soldiers in to Russian borders.”<br><br>“The report is based around an obvious factual error, conflating the total number of vehicles with the actual number of tanks, and therefore multiplying the actual tank force 20 times over. For context, military website globalfirepower.com puts the total US tank force at 8,848. If the DNI story had been true, it would have meant sending 40% of all the US’ main battle tanks to Europe in one go.<br><br>“Could this have been an innocent mistake? The simple answer is “no”. The journalist who penned the story had a sufficient command of the details to be able to write, later in the same article, “In January, 26 tanks, 100 other vehicles and 120 containers will be transported by train to Lithuania. Germany will send the 122nd Infantry Battalion.” Yet the same author apparently believed, in the headline and first paragraph, that every single vehicle in Atlantic Resolve is a tank. To call this an innocent mistake is simply not plausible.<br><br>“The DNI story can only realistically be considered a deliberate fake designed to caricaturize and demonize NATO, the United States and Germany (tactfully referred to in the report as having “rolled over Eastern Europe in its war of extermination 75 years ago”) by grossly overstating the number of MBTs involved.”</i><br><br>This behaviour matches T0016: Create Clickbait because the person who wrote the story is shown to be aware of the fact that there were non-tank vehicles later in their story, but still chose to give the article a sensationalist headline claiming that all vehicles being sent were tanks. |
|
||||
|
||||
|
||||
|
@ -7,8 +7,7 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00002 #VaccinateUS](../../generated_pages/incidents/I00002.md) | Promote "funding" campaign |
|
||||
| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
|
||||
|
||||
|
||||
|
@ -7,9 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00002 #VaccinateUS](../../generated_pages/incidents/I00002.md) | buy FB targeted ads |
|
||||
| [I00005 Brexit vote](../../generated_pages/incidents/I00005.md) | Targeted FB paid ads |
|
||||
| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | Targeted FB paid ads |
|
||||
| [I00097 Report: Not Just Algorithms](../../generated_pages/incidents/I00097.md) | <i>This report explores the role of four systems (recommender systems, content moderation systems, ad approval systems and ad management systems) in creating risks around eating disorders.<br><br>[...]<br><br>Ad approval systems can create risks. We created 12 ‘fake’ ads that promoted dangerous weight loss techniques and behaviours. We tested to see if these ads would be approved to run, and they were. This means dangerous behaviours can be promoted in paid-for advertising. (Requests to run ads were withdrawn after approval or rejection, so no dangerous advertising was published as a result of this experiment.)<br><br>Specifically: On TikTok, 100% of the ads were approved to run; On Facebook, 83% of the ads were approved to run; On Google, 75% of the ads were approved to run.<br><br>Ad management systems can create risks. We investigated how platforms allow advertisers to target users, and found that it is possible to target people who may be interested in pro-eating disorder content.<br><br>Specifically: On TikTok: End-users who interact with pro-eating disorder content on TikTok, download advertisers’ eating disorder apps or visit their websites can be targeted; On Meta: End-users who interact with pro-eating disorder content on Meta, download advertisers’ eating disorder apps or visit their websites can be targeted; On X: End-users who follow pro- eating disorder accounts, or ‘look’ like them, can be targeted; On Google: End-users who search specific words or combinations of words (including pro-eating disorder words), watch pro-eating disorder YouTube channels and probably those who download eating disorder and mental health apps can be targeted.</i><br><br>Advertising platforms managed by TikTok, Facebook, and Google approved adverts to be displayed on their platforms. These platforms enabled users to deliver targeted advertising to potentially vulnerable platform users (T0018: Purchase Targeted Advertisements, T0153.005: Online Advertising Platform). |
|
||||
|
||||
|
||||
|
@ -7,10 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00010 ParklandTeens](../../generated_pages/incidents/I00010.md) | 4Chan/8Chan - trial content |
|
||||
| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | 4Chan/8Chan - trial content |
|
||||
| [I00032 Kavanaugh](../../generated_pages/incidents/I00032.md) | 4Chan/8Chan - trial content |
|
||||
| [I00044 JadeHelm exercise](../../generated_pages/incidents/I00044.md) | 4Chan/8Chan - trial content |
|
||||
|
||||
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00056 Iran Influence Operations](../../generated_pages/incidents/I00056.md) | Memes... anti-Isreal/USA/West, conspiracy narratives |
|
||||
|
||||
|
||||
|
||||
|
@ -7,8 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00047 Sea of Azov](../../generated_pages/incidents/I00047.md) | (Distort) Kremlin-controlled RT cited Russian Minister of Foreign Affairs Sergei Lavrov suggesting that Ukraine deliberately provoked Russia in hopes of gaining additional support from the United States and Europe. |
|
||||
| [I00053 China Huawei CFO Arrest](../../generated_pages/incidents/I00053.md) | Distorted, saccharine “news” about the Chinese State and Party |
|
||||
| [I00079 Three thousand fake tanks](../../generated_pages/incidents/I00079.md) | <i>“On January 4 [2017], a little-known news site based in Donetsk, Ukraine published an article claiming that the United States was sending 3,600 tanks to Europe as part of “the NATO war preparation against Russia”.<br><br> “Like much fake news, this story started with a grain of truth: the US was about to reinforce its armored units in Europe. However, the article converted literally thousands of other vehicles — including hundreds of Humvees and trailers — into tanks, building the US force into something 20 times more powerful than it actually was.<br><br> “The story caught on online. Within three days it had been repeated by a dozen websites in the United States, Canada and Europe, and shared some 40,000 times. It was translated into Norwegian; quoted, unchallenged, by Russian state news agency RIA Novosti; and spread among Russian-language websites.<br><br> “It was also an obvious fake, as any Google news search would have revealed. Yet despite its evident falsehood, it spread widely, and not just in directly Kremlin-run media. Tracking the spread of this fake therefore shines a light on the wider question of how fake stories are dispersed.”</i><br><br> Russian state news agency RIA Novosti presents themselves as a news outlet (T0097.202: News Outlet Persona). RIO Novosti is a real news outlet (T0143.001: Authentic Persona), but it did not carry out a basic investigation into the veracity of the narrative they published implicitly expected of institutions presenting themselves as news outlets.<br><br> We can’t know how or why this narrative ended up being published by RIA Novosti, but we know that it presented a distorted reality as authentic information (T0023: Distort Facts), claiming that the US was sending 3,600 tanks, instead of 3,600 vehicles which included ~180 tanks. |
|
||||
|
||||
|
||||
|
@ -7,8 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00005 Brexit vote](../../generated_pages/incidents/I00005.md) | manipulate social media "online polls"? |
|
||||
| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | manipulate social media "online polls"? |
|
||||
|
||||
|
||||
|
||||
|
@ -7,9 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00006 Columbian Chemicals](../../generated_pages/incidents/I00006.md) | bait journalists/media/politicians |
|
||||
| [I00010 ParklandTeens](../../generated_pages/incidents/I00010.md) | journalist/media baiting |
|
||||
| [I00015 ConcordDiscovery](../../generated_pages/incidents/I00015.md) | journalist/media baiting |
|
||||
|
||||
|
||||
|
||||
|
@ -7,8 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00029 MH17 investigation](../../generated_pages/incidents/I00029.md) | Demand insurmountable proof |
|
||||
| [I00047 Sea of Azov](../../generated_pages/incidents/I00047.md) | Demand insurmountable proof |
|
||||
|
||||
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00015 ConcordDiscovery](../../generated_pages/incidents/I00015.md) | Circulate to media via DM, then release publicly |
|
||||
|
||||
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00009 PhilippinesExpert](../../generated_pages/incidents/I00009.md) | Using "expert" |
|
||||
|
||||
|
||||
|
||||
|
@ -7,18 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00002 #VaccinateUS](../../generated_pages/incidents/I00002.md) | SEO optimisation/manipulation ("key words") |
|
||||
| [I00005 Brexit vote](../../generated_pages/incidents/I00005.md) | SEO optimisation/manipulation ("key words") |
|
||||
| [I00010 ParklandTeens](../../generated_pages/incidents/I00010.md) | SEO optimisation/manipulation ("key words") |
|
||||
| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | SEO optimisation/manipulation ("key words") |
|
||||
| [I00029 MH17 investigation](../../generated_pages/incidents/I00029.md) | SEO optimisation/manipulation ("key words") |
|
||||
| [I00032 Kavanaugh](../../generated_pages/incidents/I00032.md) | SEO optimisation/manipulation ("key words") |
|
||||
| [I00044 JadeHelm exercise](../../generated_pages/incidents/I00044.md) | SEO optimisation/manipulation ("key words") |
|
||||
| [I00049 White Helmets: Chemical Weapons](../../generated_pages/incidents/I00049.md) | SEO optimisation/manipulation ("key words") |
|
||||
| [I00050 #HandsOffVenezuela](../../generated_pages/incidents/I00050.md) | SEO optimisation/manipulation ("key words") |
|
||||
| [I00051 Integrity Initiative](../../generated_pages/incidents/I00051.md) | SEO optimisation/manipulation ("key words") |
|
||||
| [I00056 Iran Influence Operations](../../generated_pages/incidents/I00056.md) | SEO optimisation/manipulation ("key words") |
|
||||
| [I00063 Olympic Doping Scandal](../../generated_pages/incidents/I00063.md) | SEO optimisation/manipulation ("key words") |
|
||||
|
||||
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00033 China 50cent Army](../../generated_pages/incidents/I00033.md) | cow online opinion leaders into submission, muzzling social media as a political force |
|
||||
|
||||
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00033 China 50cent Army](../../generated_pages/incidents/I00033.md) | cow online opinion leaders into submission, muzzling social media as a political force |
|
||||
| [I00122 The Extreme Right on Discord](../../generated_pages/incidents/I00122.md) | Discord is an example of a T0151.004: Chat Platform, which allows users to create their own T0151.005: Chat Community Server. The Institute for Strategic Dialog (ISD) conducted an investigation into the extreme right’s usage of Discord servers:<br><br><i>Discord is a free service accessible via phones and computers. It allows users to talk to each other in real time via voice, text or video chat and emerged in 2015 as a platform designed to assist gamers in communicating with each other while playing video games. The popularity of the platform has surged in recent years, and it is currently estimated to have 140 million monthly active users.<br><br>Chatrooms – known as servers - in the platform can be created by anyone, and they are used for a range of purposes that extend far beyond gaming. Such purposes include the discussion of extreme right-wing ideologies and the planning of offline extremist activity. Ahead of the far-right Unite the Right rally in Charlottesville, Virginia, in August 2017, organisers used Discord to plan and promote events and posted swastikas and praised Hitler in chat rooms with names like “National Socialist Army” and “Führer’s Gas Chamber”.</i><br><br>In this example a Discord server was used to organise the 2017 Charlottesville Unite the Right rally. Chat rooms such in the server were used to discuss different topics related to the rally (T0057: Organise Events, T0126.002: Facilitate Logistics or Support for Attendance, T0151.004: Chat Platform, T0151.005: Chat Community Server, T0151.006: Chat Room).<br><br><i>Another primary activity engaged in the servers analysed are raids against other servers associated with political opponents, and in particular those that appear to be pro-LGBTQ. Raids are a phenomenon in which a small group of users will join a Discord server with the sole purpose of spamming the host with offensive or incendiary messages and content with the aim of upsetting local users or having the host server banned by Discord. On two servers examined here, raiding was their primary function.<br><br>Among servers devoted to this activity, specific channels were often created to host links to servers that users were then encouraged to raid. Users are encouraged to be as offensive as possible with the aim of upsetting or angering users on the raided server, and channels often had content banks of offensive memes and content to be shared on raided servers.<br><br>The use of raids demonstrates the gamified nature of extremist activity on Discord, where use of the platform and harassment of political opponents is itself turned into a type of real-life video game designed to strengthen in-group affiliation. This combined with the broader extremist activity identified in these channels suggests that the combative activity of raiding could provide a pathway for younger people to become more engaged with extremist activity.</i><br><br>Discord servers were used by members of the extreme right to coordinate harassment of targeted communities (T0048: Harass, T0049.005: Conduct Swarming, T0151.004: Chat Platform, T0151.005: Chat Community Server). |
|
||||
| [I00123 The Extreme Right on Steam](../../generated_pages/incidents/I00123.md) | ISD conducted an investigation into the usage of social groups on Steam. Steam is an online platform used to buy and sell digital games, and includes the Steam community feature, which “allows users to find friends and join groups and discussion forums, while also offering in-game voice and text chat”. Actors have used Steam’s social capabilities to enable online harm campaigns:<br><br><i>One function of these Steam groups is the organisation of ‘raids’ – coordinated trolling activity against their political opponents. An example of this can be seen in a white power music group sharing a link to an Israeli Steam group, encouraging other members to “help me raid this juden [German word for Jew] group”. The comments section of said target group show that neo-Nazi and antisemitic comments were consistently posted in the group just two minutes after the instruction had been posted in the extremist group, highlighting the swiftness with which racially motivated harassment can be directed online.</i><br><br>Threat actors used social groups on Steam to organise harassment of targets (T0152.009: Software Delivery Platform, T0151.002: Online Community Group, T0049.005: Conduct Swarming, T0048: Harass). |
|
||||
|
||||
|
@ -7,8 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00033 China 50cent Army](../../generated_pages/incidents/I00033.md) | 2,000,000 people (est.) part of state run/sponsored astroturfing |
|
||||
| [I00034 DibaFacebookExpedition](../../generated_pages/incidents/I00034.md) | flood the Facebook pages of Taiwanese politicians and news agencies with a pro-PRC message, Democratic Progressive Party (DPP), attracted nearly 40,000 Facebook comments in just eight hours. |
|
||||
|
||||
|
||||
|
||||
|
@ -7,10 +7,6 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00005 Brexit vote](../../generated_pages/incidents/I00005.md) | Digital to physical "organize+promote" rallies & events? |
|
||||
| [I00017 US presidential elections](../../generated_pages/incidents/I00017.md) | Digital to physical "organize+promote" rallies & events |
|
||||
| [I00032 Kavanaugh](../../generated_pages/incidents/I00032.md) | Digital to physical "organize+promote" rallies & events? |
|
||||
| [I00053 China Huawei CFO Arrest](../../generated_pages/incidents/I00053.md) | Events coordinated and promoted across media platforms, Extend digital the physical space… gatherings ie: support for Meng outside courthouse |
|
||||
| [I00122 The Extreme Right on Discord](../../generated_pages/incidents/I00122.md) | Discord is an example of a T0151.004: Chat Platform, which allows users to create their own T0151.005: Chat Community Server. The Institute for Strategic Dialog (ISD) conducted an investigation into the extreme right’s usage of Discord servers:<br><br><i>Discord is a free service accessible via phones and computers. It allows users to talk to each other in real time via voice, text or video chat and emerged in 2015 as a platform designed to assist gamers in communicating with each other while playing video games. The popularity of the platform has surged in recent years, and it is currently estimated to have 140 million monthly active users.<br><br>Chatrooms – known as servers - in the platform can be created by anyone, and they are used for a range of purposes that extend far beyond gaming. Such purposes include the discussion of extreme right-wing ideologies and the planning of offline extremist activity. Ahead of the far-right Unite the Right rally in Charlottesville, Virginia, in August 2017, organisers used Discord to plan and promote events and posted swastikas and praised Hitler in chat rooms with names like “National Socialist Army” and “Führer’s Gas Chamber”.</i><br><br>In this example a Discord server was used to organise the 2017 Charlottesville Unite the Right rally. Chat rooms such in the server were used to discuss different topics related to the rally (T0057: Organise Events, T0126.002: Facilitate Logistics or Support for Attendance, T0151.004: Chat Platform, T0151.005: Chat Community Server, T0151.006: Chat Room).<br><br><i>Another primary activity engaged in the servers analysed are raids against other servers associated with political opponents, and in particular those that appear to be pro-LGBTQ. Raids are a phenomenon in which a small group of users will join a Discord server with the sole purpose of spamming the host with offensive or incendiary messages and content with the aim of upsetting local users or having the host server banned by Discord. On two servers examined here, raiding was their primary function.<br><br>Among servers devoted to this activity, specific channels were often created to host links to servers that users were then encouraged to raid. Users are encouraged to be as offensive as possible with the aim of upsetting or angering users on the raided server, and channels often had content banks of offensive memes and content to be shared on raided servers.<br><br>The use of raids demonstrates the gamified nature of extremist activity on Discord, where use of the platform and harassment of political opponents is itself turned into a type of real-life video game designed to strengthen in-group affiliation. This combined with the broader extremist activity identified in these channels suggests that the combative activity of raiding could provide a pathway for younger people to become more engaged with extremist activity.</i><br><br>Discord servers were used by members of the extreme right to coordinate harassment of targeted communities (T0048: Harass, T0049.005: Conduct Swarming, T0151.004: Chat Platform, T0151.005: Chat Community Server). |
|
||||
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain) reveal a website set up to sell merchandise (T0152.004: Website, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.<.i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server, T0149.006: IP Address). |
|
||||
| [I00109 Coordinated Facebook Pages Designed to Fund a White Supremacist Agenda](../../generated_pages/incidents/I00109.md) | This report examines the white nationalist group Suavelos’ use of Facebook to draw visitors to its website without overtly revealing their racist ideology. This section of the report looks at technical indicators associated with the Suavelos website, and attributions which can be made as a consequence:<i><br><br>[The Google AdSense tag set up on Suavelos.eu was also found on the following domains, indicating that they are controlled by the same actor;] Alabastro.eu: an online shop to buy “white nationalists” t-shirts [and] ARPAC.eu: the website of a registered non-profit organisation advocating to lift regulation on gun control in France.<br><br>Other domains attributed to Suavelos (T0149.001: Domain Asset) reveal a website set up to sell merchandise (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0061: Sell Merchandise), and a website hosting a registered French non-profit (T0152.004: Website Asset, T0097.207: NGO Persona).<br><br>To learn more about the suavelos.eu domain, we collected the following data: The domain is hosted on OVH; The owner’s identity is protected; The IP Address of the server is 94.23.253.173, which is shared with 20 other domains. <br><br>The relative low number of websites hosted on this IP address could indicate that they all belong to the same people, and are hosted on the same private server.</i><br><br>Suavelos registered a domain using the web hosting provider OVH (T0149.001: Domain Asset, T0152.003: Website Hosting Platform, T0150.006: Purchased). The site’s IP address reveals a server hosting other domains potentially owned by the actors (T0149.005: Server Asset, T0149.006: IP Address Asset). |
|
||||
|
||||
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). |
|
||||
| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). |
|
||||
|
||||
|
||||
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -7,7 +7,7 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
|
||||
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). |
|
||||
| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). |
|
||||
|
||||
|
||||
|
||||
|
@ -7,9 +7,9 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00099 More Women Are Facing The Reality Of Deepfakes, And They’re Ruining Lives](../../generated_pages/incidents/I00099.md) | <i>Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.<br><br>[...]<br><br>Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.<br><br>Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.<br><br>[...]<br><br>Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.<br><br>“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?</i><br><br>A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). <br><br>Another website enabled users to commission custom deepfakes (T0152.004: Website, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access). |
|
||||
| [I00099 More Women Are Facing The Reality Of Deepfakes, And They’re Ruining Lives](../../generated_pages/incidents/I00099.md) | <i>Last October, British writer Helen was alerted to a series of deepfakes on a porn site that appear to show her engaging in extreme acts of sexual violence. That night, the images replayed themselves over and over in horrific nightmares and she was gripped by an all-consuming feeling of dread. “It’s like you’re in a tunnel, going further and further into this enclosed space, where there’s no light,” she tells Vogue. This feeling pervaded Helen’s life. Whenever she left the house, she felt exposed. On runs, she experienced panic attacks. Helen still has no idea who did this to her.<br><br>[...]<br><br>Amnesty International has been investigating the effects of abuse against women on Twitter, specifically in relation to how they act online thereafter. According to the charity, abuse creates what they’ve called “the silencing effect” whereby women feel discouraged from participating online. The same can be said for victims of deepfakes.<br><br>Helen has never been afraid to use her voice, writing deeply personal accounts of postnatal depression. But the deepfakes created a feeling of shame so strong she thought she’d be carrying this “dirty secret” forever, and so she stopped writing.<br><br>[...]<br><br>Meanwhile, deepfake ‘communities’ are thriving. There are now dedicated sites, user-friendly apps and organised ‘request’ procedures. Some sites allow you to commission custom deepfakes for £25, while on others you can upload a woman’s image and a bot will strip her naked.<br><br>“This violation is not something that should be normalised,” says Gibi, an ASMR artist with 3.13 million YouTube subscribers. Gibi has given up trying to keep tabs on the deepfakes of her. For Gibi, the most egregious part of all of this is the fact that people are “profiting off my face, doing something that I didn’t consent to, like my suffering is your livelihood.” She’s even been approached by a company offering to remove the deepfakes — for £500 a video. This has to end. But how?</i><br><br>A website hosting pornographic content provided users the ability to create deepfake content (T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). <br><br>Another website enabled users to commission custom deepfakes (T0152.004: Website Asset, T0148.004: Payment Processing Capability, T0086.002: Develop AI-Generated Images (Deepfakes), T0155.005: Paid Access Asset). |
|
||||
| [I00100 Why ThisPersonDoesNotExist (and its copycats) need to be restricted](../../generated_pages/incidents/I00100.md) | <i>You might have heard about the recent viral sensation, ThisPersonDoesNotExist.com, a website, launched two weeks ago, that uses Nvidia’s publicly available artificial intelligence technology to draw an invented, photo-realistic human being with each refresh. The tech is impressive and artistically evocative. It’s also irresponsible and needs to be restricted immediately.<br><br>[...]<br><br>Prior to this technology, scammers faced three major risks when using fake photos. Each of these risks had the potential to put them out business, or in jail.<br><br>Risk #1: Someone recognizes the photo. While the odds of this are long-shot, it does happen.<br><br>Risk #2: Someone reverse image searches the photo with a service like TinEye or Google Image Search and finds that it’s been posted elsewhere. Reverse image search is one of the top anti-fraud measures recommended by consumer protection advocates.<br><br>Risk #3: If the crime is successful, law enforcement uses the fake photo to figure out the scammer’s identity after the fact. Perhaps the scammer used an old classmate’s photo. Perhaps their personal account follows the Instagram member they pilfered. And so on: people make mistakes.<br><br>The problem with AI-generated photos is that they carry none of these risks. No one will recognize a human who’s never existed before. Google Image Search will return 0 results, possibly instilling a false sense of security in the searcher. And AI-generated photos don’t give law enforcement much to work with.</i><br><br>ThisPersonDoesNotExist is an online platform which, when visited, produces AI generated images of peoples’ faces (T0146.006: Open Access Platform, T0154.002: AI Media Platform, T0086.002: Develop AI-Generated Images (Deepfakes)). |
|
||||
| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account, T0148.007: eCommerce Platform). |
|
||||
| [I00106 Facebook Is Being Flooded With Gross AI-Generated Images of Hurricane Helene Devastation](../../generated_pages/incidents/I00106.md) | <i>As families desperately seek to find missing loved ones and communities grapple with immeasurable losses of both life and property in the wake of [2024’s] Hurricane Helene, AI slop scammers appear to be capitalizing on the moment for personal gain.<br><br>A Facebook account called "Coastal Views" usually shares calmer AI imagery of nature-filled beachside scenes. The account's banner image showcases a signpost reading "OBX Live," OBX being shorthand for North Carolina's Outer Banks islands.<br><br>But starting this weekend, the account shifted its approach dramatically, as first flagged by a social media user on X.<br><br>Instead of posting "photos" of leaping dolphins and sandy beaches, the account suddenly started publishing images of flooded mountain neighborhoods, submerged houses, and dogs sitting on top of roofs.<br><br>But instead of spreading vital information to those affected by the natural disaster, or at the very least sharing real photos of the destruction, the account is seemingly trying to use AI to cash in on all the attention the hurricane has been getting.<br><br>The account links to an Etsy page for a business called" OuterBanks2023," where somebody who goes by "Alexandr" sells AI-generated prints of horses touching snouts with sea turtles, Santa running down the shoreline with a reindeer, and sunsets over ocean waves.</i><br><br>A Facebook page which presented itself as being associated with North Carolina which posted AI generated images changed to posting AI generated images of hurricane damage after Hurricane Helene hit North Carolina (T0151.003: Online Community Page, T0151.001: Social Media Platform, T0115: Post Content, T0086.002: Develop AI-Generated Images (Deepfakes), T0068: Respond to Breaking News Event or Active Crisis). <br><br>The account included links (T0122: Direct Users to Alternative Platforms) to an account on Etsy, which sold prints of AI generated images (T0146: Account Asset, T0148.007: eCommerce Platform). |
|
||||
|
||||
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
|
||||
|
||||
|
||||
|
@ -7,9 +7,9 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). |
|
||||
| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
| [I00110 How COVID-19 conspiracists and extremists use crowdfunding platforms to fund their activities](../../generated_pages/incidents/I00110.md) | The EU Disinfo Lab produced a report into disinformation published on crowdfunding platforms:<br><br><i>More worrisome is the direct monetisation of disinformation happening on crowdfunding platforms: on Kickstarter, we found a user openly raising money for a documentary project suggesting that COVID-19 is a conspiracy.</i><br><br>A Kickstarter user attempted to use the platform to fund production of a documentary (T0017: Conduct Fundraising, T0087: Develop Video-Based Content, T0146: Account Asset, T0148.006: Crowdfunding Platform).<br><br><i>On Patreon, we found several instances of direct monetisation of COVID-19 disinformation, including posts promoting a device allegedly protecting against COVID-19 and 5G, as well as posts related to the “Plandemic” conspiracy video, which gained attention on YouTube before being removed by the platform.<br><br>We also found an account called “Stranger than fiction” entirely dedicated to disinformation, which openly states that their content was “Banned by screwtube and fakebook, our videos have been viewed over a billion times.”</i><br><br>The “Stranger than fiction” user presented itself as an alternative news source which had been banned from other platforms (T0146: Account Asset, T0097.202: News Outlet Persona, T0121.001: Bypass Content Bocking, T0152.012: Subscription Service Platform).<br><br><i>On the US-based crowdfunding platform IndieGogo, EU DisinfoLab found a successful crowdfunding campaign of €133.903 for a book called Revolution Q. This book, now also available on Amazon, claims to be “Written for both newcomers and long-time QAnon fans alike, this book is a treasure-trove of information designed to help everyone weather The Storm.”</i><br><br>An IndieGogo account was used to gather funds to produce a book on QAnon (T0017: Conduct Fundraising, T0085.005: Develop Book, T0146: Account Asset, T0148.006: Crowdfunding Platform), with the book later sold on Amazon marketplace (T0148.007: eCommerce Platform). |
|
||||
| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). |
|
||||
| [I00125 The Agency](../../generated_pages/incidents/I00125.md) | In 2014 threat actors attributed to Russia spread the false narrative that a local chemical plant had leaked toxic fumes. This report discusses aspects of the operation:<br><br><i>[The chemical plant leak] hoax was just one in a wave of similar attacks during the second half of last year. On Dec. 13, two months after a handful of Ebola cases in the United States touched off a minor media panic, many of the same Twitter accounts used to spread the Columbian Chemicals hoax began to post about an outbreak of Ebola in Atlanta. [...] Again, the attention to detail was remarkable, suggesting a tremendous amount of effort. A YouTube video showed a team of hazmat-suited medical workers transporting a victim from the airport. Beyoncé’s recent single “7/11” played in the background, an apparent attempt to establish the video’s contemporaneity. A truck in the parking lot sported the logo of the Hartsfield-Jackson Atlanta International Airport.</i><br><br>Accounts which previously presented as Louisiana locals were repurposed for use in a different campaign, this time presenting as locals to Atlanta, a place over 500 miles away from Louisiana and in a different timezone (T0146: Account Asset, T0097.101: Local Persona, T0143.002: Fabricated Persona, T0151.008: Microblogging Platform, T0150.004: Repurposed Asset). <br><br>A video was created which appeared to support the campaign’s narrative (T0087: Develop Video-Based Content), with great attention given to small details which made the video appear more legitimate. |
|
||||
|
||||
|
||||
|
||||
|
@ -8,8 +8,8 @@
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00068 Attempted Audio Deepfake Call Targets LastPass Employee](../../generated_pages/incidents/I00068.md) | <i>“While reports of [...] deepfake calls targeting private companies are luckily still rare, LastPass itself experienced a deepfake attempt earlier today that we are sharing with the larger community to help raise awareness that this tactic is spreading and all companies should be on the alert. In our case, an employee received a series of calls, texts, and at least one voicemail featuring an audio deepfake from a threat actor impersonating our CEO via WhatsApp. As the attempted communication was outside of normal business communication channels and due to the employee’s suspicion regarding the presence of many of the hallmarks of a social engineering attempt (such as forced urgency), our employee rightly ignored the messages and reported the incident to our internal security team so that we could take steps to both mitigate the threat and raise awareness of the tactic both internally and externally.”</i><br><br> In this example attackers impersonated the CEO of LastPass (T0097.100: Individual Persona, T0143.003: Impersonated Persona), targeting one of its employees over WhatsApp (T0151.004: Chat Platform,T0155.007: Encrypted Communication Channel) using deepfaked audio (T0088.001: Develop AI-Generated Audio (Deepfakes)). |
|
||||
| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [I00103 The racist AI deepfake that fooled and divided a community](../../generated_pages/incidents/I00103.md) | <i>“I seriously don't understand why I have to constantly put up with these dumbasses here every day.”<br><br>So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.<br><br>The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.<br><br>The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.<br><br>[...]<br><br>But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.<br><br>[...]<br><br>[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.<br><br>And they believed they knew who made the fake.<br><br>Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.<br><br>He was arrested at the airport, where police say he was planning to fly to Houston, Texas.<br><br>Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.<br><br>Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.<br><br>Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.</i><br><br>By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account, T0154.002: AI Media Platform). |
|
||||
| [I00096 China ramps up use of AI misinformation](../../generated_pages/incidents/I00096.md) | The Microsoft Threat Analysis Centre (MTAC) published a report documenting the use of AI by pro-Chinese threat actors:<br><br><i>On 13 January, Spamouflage [(a Pro-Chinese Communist Party actor)] posted audio clips to YouTube of independent candidate [for Taiwan’s Jan 2024 presidential election] Terry Gou – who also founded electronics giant Foxconn – in which Gou endorsed another candidate in the race. This clip was almost certainly AI-generated, and it was swiftly removed by YouTube. A fake letter purporting to be from Gou, endorsing the same candidate, had already circulated – Gou had of course made no such endorsement.</i><br><br>Here Spamoflage used an account on YouTube to post AI Generated audio impersonating an electoral candidate (T0146: Account Asset, T0152.006: Video Platform, T0115: Post Content, T0088.001: Develop AI-Generated Audio (Deepfakes), T0143.003: Impersonated Persona, T0097.110: Party Official Persona).<br><br><i>Spamouflage also exploited AI-powered video platform CapCut – which is owned by TikTok backers ByteDance – to generate fake news anchors which were used in a variety of campaigns targeting the various presidential candidates in Taiwan.</i><br><br>Spamoflage created accounts on CapCut, which it used to create AI-generated videos of fabricated news anchors (T0146: Account Asset, T0154.002: AI Media Platform, T0087.001: Develop AI-Generated Video (Deepfakes), T0143.002: Fabricated Persona, T0097.102: Journalist Persona). |
|
||||
| [I00103 The racist AI deepfake that fooled and divided a community](../../generated_pages/incidents/I00103.md) | <i>“I seriously don't understand why I have to constantly put up with these dumbasses here every day.”<br><br>So began what appeared to be a long tirade from the principal of Pikesville High School, punctuated with racist, antisemitic and offensive tropes. It sounded like it had been secretly recorded.<br><br>The speaker went on to bemoan “ungrateful black kids” and Jewish people in the community.<br><br>The clip, first posted in [January 2024], went viral nationally. But it really struck a nerve in the peaceful, leafy suburb of Pikesville, which has large black and Jewish populations, and in the nearby city of Baltimore, Maryland. Principal Eric Eiswert was put on paid administrative leave pending an investigation.<br><br>[...]<br><br>But what those sharing the clip didn’t realise at the time was that another bombshell was about to drop: the clip was an AI-generated fake.<br><br>[...]<br><br>[In April 2024], Baltimore Police Chief Robert McCullough confirmed they now had “conclusive evidence that the recording was not authentic”.<br><br>And they believed they knew who made the fake.<br><br>Police charged 31-year-old Dazhon Darien, the school’s athletics director, with several counts related to the fake video. Charges included theft, retaliating against a witness and stalking.<br><br>He was arrested at the airport, where police say he was planning to fly to Houston, Texas.<br><br>Police say that Mr Darien had been under investigation by Principal Eiswert over an alleged theft of $1,916 (£1,460) from the school. They also allege there had been “work performance challenges” and his contract was likely not to be renewed.<br><br>Their theory was that by creating the deepfake recording, he hoped to discredit the principal before he could be fired.<br><br>Investigators say they traced an email used to send the original video to a server connected to Mr Darien, and allege that he used Baltimore County Public Schools' computer network to access AI tools. He is due to stand trial in December 2024.</i><br><br>By associating Mr Darien to the server used to email the original AI generated audio, investigators link Darien to the fabricated content (T0149.005: Server Asset, T0088.001: AI Generated Audio (Deepfakes)). They also assert that Darien used computers owned by the school to access platforms used to generate the audio (T0146: Account Asset, T0154.002: AI Media Platform). |
|
||||
|
||||
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
| Incident | Descriptions given for this incident |
|
||||
| -------- | -------------------- |
|
||||
| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access). |
|
||||
| [I00111 Patreon Is Bankrolling Climate Change Deniers While We All Burn](../../generated_pages/incidents/I00111.md) | In this article VICE News discusses a report produced by Advance Democracy on people who use Patreon to spread the false claim that an impending ice age will reverse the harms of the ongoing climate crisis:<br><br><i>“The spread of climate misinformation is prolific on social media, as well as on sites like Patreon, where users are actually financially compensated through the platform for spreading falsehoods,” Daniel Jones, president of Advance Democracy, told VICE News.<br><br>“Companies hosting and promoting climate misinformation have a responsibility to take action to reduce dangerous misinformation, as falsehoods about climate science are every bit as dangerous as lies about vaccinations and disinformation about our elections.”<br><br>Patreon did not respond to VICE News’ request for comment on the report’s findings.<br><br>One of the biggest accounts spreading climate conspiracies is ADAPT 2030, which is run by David DuByne, who has 1,100 followers on Patreon. He is currently making over $3,500 every month from his subscribers.<br><br>[The science DuByne relies on does not support his hypothesis. However,] this has not prevented DuByne and many others from preying on people’s fears about climate change to spread conspiracies about an impending ice age, which they say will miraculously fix all of earth’s climate problems.<br><br>DuByne offers seven different membership levels for supporters, beginning at just $1 per month.<br><br>The most expensive costs $100 a month, and gives patrons “a private 20-minute call with David DuByne once per month, to discuss your particular preparedness issues or concerns.” So far just two people are paying this amount.<br><br>The researchers also found at least eight other accounts on Patreon that have spread climate change conspiracy theories as part of wider conspiracy sharing, including baseless claims about COVID-19 and the legitimacy of Joe Biden’s presidency. Some of these accounts are earning over $600 per month.</i><br><br>David DuByne created an account on Patreon, which he uses to post text, videos, and podcasts for his subscribers to discuss (T0085: Develop Text-Based Content, T0087: Develop Video-Based Content, T0088: Develop Audio-Based Content, T0146: Account Asset, T0115: Post Content, T0152.012: Subscription Service Platform, T0151.014: Comments Section, T0155.006: Subscription Access Asset). |
|
||||
|
||||
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user