162 lines
5.2 KiB
JavaScript
162 lines
5.2 KiB
JavaScript
import fs from 'fs'
|
|
|
|
import Web3 from 'web3'
|
|
|
|
import { uniq } from 'lodash'
|
|
|
|
import { loadCachedEvents, needsArg, save } from './helpers'
|
|
|
|
import networkConfig, { enabledChains } from '../networkConfig'
|
|
|
|
import RELAYER_REGISTRY_ABI from '../abis/RelayerRegistry.abi.json'
|
|
import AGGREGATOR_REGISTRY_ABI from '../abis/Aggregator.abi.json'
|
|
|
|
const RELAYERS_DIR_PATH = './static/relayers/'
|
|
|
|
const zeroAddress = '0x0000000000000000000000000000000000000000'
|
|
|
|
const subdomains = Object.values(networkConfig).map(({ ensSubdomainKey }) => ensSubdomainKey)
|
|
|
|
async function update(netId, untilBlock, rpcIndex) {
|
|
// Get all of the network data we need
|
|
// "Contract" in fact means address
|
|
const { rpcUrls, tokens, registryContract, aggregatorContract, routerContract } = networkConfig[
|
|
`netId${netId}`
|
|
]
|
|
|
|
// Get the rpcUrl we have chosen
|
|
const rpcUrl = Object.values(rpcUrls)[rpcIndex].url
|
|
|
|
// Prepare the provider
|
|
const provider = new Web3(new Web3.providers.HttpProvider(rpcUrl))
|
|
|
|
// Get the contracts
|
|
const aggregator = new provider.eth.Contract(AGGREGATOR_REGISTRY_ABI, aggregatorContract)
|
|
const registry = new provider.eth.Contract(RELAYER_REGISTRY_ABI, registryContract)
|
|
|
|
// Get all tokens for the network
|
|
const tokenSymbols = Object.keys(tokens)
|
|
|
|
// All avoiding txs
|
|
const avoids = []
|
|
|
|
// Start discovering unwanted relayers
|
|
for (const tokenSymbol of tokenSymbols) {
|
|
// Load denominations
|
|
const denoms = Object.entries(tokens[tokenSymbol].instanceAddress)
|
|
|
|
// Go through all denominations
|
|
for (const denom of denoms) {
|
|
// Load the cache, we need it in reverse
|
|
const withdrawals = loadCachedEvents({
|
|
name: `withdrawals_${netId}_${tokenSymbol}_${denom[0]}.json`,
|
|
directory: `./static/events/`,
|
|
deployedBlock: undefined
|
|
}).events.reverse()
|
|
|
|
// Start searching
|
|
for (const withdrawal of withdrawals) {
|
|
// If we are at the until block then stop scanning
|
|
if (withdrawal.blockNumber < untilBlock) {
|
|
break
|
|
}
|
|
|
|
// Get the tx, we need proper "to" data
|
|
const tx = await provider.eth.getTransaction(withdrawal.transactionHash)
|
|
|
|
// Found a sus tx
|
|
if (tx.to !== routerContract) {
|
|
// Look for the owner
|
|
const owner = await registry.methods.workers(tx.from).call()
|
|
|
|
// If not zeroAddress, it's an owner and he's misbehaving!
|
|
if (owner != zeroAddress) {
|
|
// Get his ens hash
|
|
const node = await registry.methods.getRelayerEnsHash(owner).call()
|
|
|
|
// Get his records
|
|
const data = (await aggregator.methods.relayersData([node], subdomains).call())[0]
|
|
|
|
// Since we are doing this in reverse, the last element is going to have the lowest block number
|
|
// We are storing this much data because relayers doing unwanted stuff should be quite rare
|
|
// And when this rolls out, it should become even moreso rare
|
|
avoids.push({
|
|
address: owner,
|
|
node: node,
|
|
net: netId,
|
|
token: tokenSymbol,
|
|
denomination: denom[0],
|
|
caughtAtTxHash: tx.hash,
|
|
blockNumber: tx.blockNumber,
|
|
records: data.records.filter((record) => record !== '')
|
|
})
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// This is a set of the avoiding data meaning unwanted relayer + transaction proof
|
|
// Block numbers are descending
|
|
const cachedAvoids = loadCachedEvents({
|
|
name: 'avoids.json',
|
|
directory: RELAYERS_DIR_PATH,
|
|
deployedBlock: 99999999999999 // Has to be the future formally
|
|
}).events
|
|
|
|
// This on the other hand is a set of the unwanted relayers
|
|
const cachedUnwanted = loadCachedEvents({
|
|
name: 'unwanted.json',
|
|
directory: RELAYERS_DIR_PATH,
|
|
deployedBlock: undefined // No such concept
|
|
}).events
|
|
|
|
// Now prepare for latter data, kill duplicates to make it a set (hopefully)
|
|
const unwanted = uniq(
|
|
avoids.map((avoidingTx) => {
|
|
return {
|
|
address: avoidingTx.address,
|
|
node: avoidingTx.node,
|
|
records: avoidingTx.records
|
|
}
|
|
})
|
|
)
|
|
|
|
// Requires path
|
|
if (!fs.existsSync(RELAYERS_DIR_PATH)) fs.mkdirSync(RELAYERS_DIR_PATH)
|
|
|
|
// Write down avoids
|
|
fs.writeFileSync(
|
|
`${RELAYERS_DIR_PATH}avoids.json`,
|
|
JSON.stringify(cachedAvoids.concat(avoids), null, 2) + '\n'
|
|
)
|
|
|
|
// Write down unwanted relayers
|
|
fs.writeFileSync(
|
|
`${RELAYERS_DIR_PATH}unwanted.json`,
|
|
JSON.stringify(cachedUnwanted.concat(unwanted), null, 2) + '\n'
|
|
)
|
|
|
|
// Finally, save both
|
|
save(`${RELAYERS_DIR_PATH}avoids.json`)
|
|
save(`${RELAYERS_DIR_PATH}unwanted.json`)
|
|
}
|
|
|
|
/**
|
|
* @param netId The netId of the chain to synchronize deposits and withdrawal events for.
|
|
* @param untilBlock The LOWER block limit to which we will search for misbehaving relayers in all withdrawal event caches.
|
|
* @param rpcIndex Optional. The RPC to use according to networkConfig.
|
|
*/
|
|
async function main() {
|
|
const [, , , netId, untilBlock, rpcIndex] = process.argv
|
|
|
|
if (!enabledChains.includes(netId)) {
|
|
throw new Error(`Supported chain ids ${enabledChains.join(', ')}`)
|
|
}
|
|
|
|
if (!untilBlock) needsArg('untilBlock')
|
|
|
|
await update(netId, untilBlock, rpcIndex !== undefined ? rpcIndex : 0)
|
|
}
|
|
|
|
main()
|