Compare commits
40 Commits
v5.0.0-bet
...
master
Author | SHA1 | Date |
---|---|---|
smart_ex | 2247730603 | |
smart_ex | c915c97b85 | |
smart_ex | 582af773e6 | |
smart_ex | 9488090892 | |
smart_ex | 632dce129d | |
smart_ex | 76cda01ee1 | |
smart_ex | 7f657c1d7d | |
smart_ex | e386a1d23c | |
smart_ex | 16d8e0fc28 | |
smart_ex | a7fc9c4b24 | |
smart_ex | ee9e27ecad | |
smart_ex | 95c6dc23c6 | |
smart_ex | cfcf1c8677 | |
smart_ex | 8868040882 | |
smart_ex | 50054e0516 | |
smart_ex | 76209e11c0 | |
Danil Kovtonyuk | 3c5eaa2c4b | |
_den | fd36dd5c5e | |
Alexey Pertsev | 350d1f1d11 | |
Danil Kovtonyuk | 49a90872a2 | |
Sergei SMART | 2f79125dd1 | |
_den | 1f900843de | |
dependabot[bot] | cc5ec13d97 | |
dependabot[bot] | 9e8a6c79ac | |
dependabot[bot] | 38b1169eae | |
dependabot[bot] | d95d495853 | |
dependabot[bot] | 54e1c03f9e | |
dependabot[bot] | 429723f370 | |
dependabot[bot] | b457820cf5 | |
Danil Kovtonyuk | 31d697701e | |
_den | b103033103 | |
_den | 1bb2d5f044 | |
_den | 44f1e1ec7a | |
Danil Kovtonyuk | 043356f5fe | |
Danil Kovtonyuk | 8f5b673f3b | |
Danil Kovtonyuk | 82e5f4ee70 | |
nikdementev | cc0a252040 | |
nikdementev | a46afed752 | |
nikdementev | 0cd9e515ae | |
nikdementev | fa77997896 |
|
@ -20,4 +20,5 @@ CONFIRMATIONS=4
|
|||
|
||||
# in GWEI
|
||||
MAX_GAS_PRICE=1000
|
||||
BASE_FEE_RESERVE_PERCENTAGE=25
|
||||
AGGREGATOR=0x8cb1436F64a3c33aD17bb42F94e255c4c0E871b2
|
||||
|
|
|
@ -19,7 +19,7 @@ jobs:
|
|||
- run: yarn test
|
||||
- run: yarn lint
|
||||
- name: Telegram Failure Notification
|
||||
uses: appleboy/telegram-action@0.0.7
|
||||
uses: appleboy/telegram-action@master
|
||||
if: failure()
|
||||
with:
|
||||
message: ❗ Build failed for [${{ github.repository }}](https://github.com/${{ github.repository }}/actions) because of ${{ github.actor }}
|
||||
|
@ -56,7 +56,7 @@ jobs:
|
|||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Telegram Message Notify
|
||||
uses: appleboy/telegram-action@0.0.7
|
||||
uses: appleboy/telegram-action@master
|
||||
with:
|
||||
to: ${{ secrets.TELEGRAM_CHAT_ID }}
|
||||
token: ${{ secrets.TELEGRAM_BOT_TOKEN }}
|
||||
|
@ -65,22 +65,35 @@ jobs:
|
|||
format: markdown
|
||||
|
||||
- name: Telegram Relayer Channel Notification
|
||||
uses: appleboy/telegram-action@0.0.7
|
||||
uses: appleboy/telegram-action@master
|
||||
with:
|
||||
to: ${{ secrets.TELEGRAM_RELAYER_CHAT_ID }}
|
||||
token: ${{ secrets.TELEGRAM_BOT_TOKEN }}
|
||||
message: |
|
||||
🚀 Published a new version of the relayer node service to docker hub: `tornadocash/relayer:v${{ steps.vars.outputs.version }}` and `tornadocash/relayer:mining`.
|
||||
🚀 Published a new version of the relayer node service for mainnet to docker hub: `tornadocash/relayer:v${{ steps.vars.outputs.version }}` and `tornadocash/relayer:mining`.
|
||||
|
||||
❗️Please update your mainnet nodes ❗️
|
||||
DO NOT TOUCH SIDECHAINS AND NOVA RELAYERS.
|
||||
|
||||
❗️Please update your nodes ❗️
|
||||
debug: true
|
||||
format: markdown
|
||||
|
||||
- name: Discord Relayer Channel Notification
|
||||
env:
|
||||
DISCORD_WEBHOOK: ${{ secrets.DISCORD_RELAYER_WEBHOOK }}
|
||||
uses: Ilshidur/action-discord@master
|
||||
with:
|
||||
args: |
|
||||
🚀 Published a new version of the relayer node service for mainnet to docker hub: `tornadocash/relayer:v${{ steps.vars.outputs.version }}` and `tornadocash/relayer:mining`.
|
||||
|
||||
❗️Please update your mainnet nodes ❗️
|
||||
DO NOT TOUCH SIDECHAINS AND NOVA RELAYERS.
|
||||
|
||||
- name: Telegram Failure Notification
|
||||
uses: appleboy/telegram-action@0.0.7
|
||||
uses: appleboy/telegram-action@master
|
||||
if: failure()
|
||||
with:
|
||||
message: ❗ Failed to publish [${{ steps.vars.outputs.repo_name }}](https://github.com/${{ github.repository }}/actions) because of ${{ github.actor }}
|
||||
message: ❗ Failed to publish [${{ steps.vars.outputs.repo_name }}](https://github.com/${{ github.repository }}/actions):v${{ steps.vars.outputs.version }} for mainnet because of ${{ github.actor }}
|
||||
format: markdown
|
||||
to: ${{ secrets.TELEGRAM_CHAT_ID }}
|
||||
token: ${{ secrets.TELEGRAM_BOT_TOKEN }}
|
||||
|
|
34
README.md
34
README.md
|
@ -1,26 +1,5 @@
|
|||
# Relayer for Tornado Cash [![Build Status](https://github.com/tornadocash/relayer/workflows/build/badge.svg)](https://github.com/tornadocash/relayer/actions) [![Docker Image Version (latest semver)](https://img.shields.io/docker/v/tornadocash/relayer?logo=docker&logoColor=%23FFFFFF&sort=semver)](https://hub.docker.com/repository/docker/tornadocash/relayer)
|
||||
|
||||
## Getting listed on app.tornado.cash
|
||||
|
||||
If you would like to be listed in tornado.cash UI relayer's dropdown option, please do the following:
|
||||
|
||||
1. Setup tornado.cash relayer node(see below for docker-compose.yml example)
|
||||
2. Setup ENS subdomain(`goerli-v2.xxx.eth`, `mainnet-v2.xxx.eth`) with TEXT record and URL key that points to your DNS or IP address.
|
||||
3. Test your relayer setup on Goerli testnet at https://app.tornado.cash by choosing custom relayer's option on withdraw tab. Enter your ens name and initiate a withdrawal.
|
||||
4. Open new Github issue in https://github.com/tornadocash/tornado-relayer/issues and specify the following:
|
||||
|
||||
- your goerli ens url
|
||||
- your mainnet ens url
|
||||
- your telegram handle
|
||||
- withdrawal tx on goerli
|
||||
- withdrawal tx on mainnet
|
||||
|
||||
Please choose your testnet relayer's fee wisely.
|
||||
|
||||
Disclaimer: Please consult with legal and tax advisors regarding the compliance of running a relayer service in your jurisdiction. The authors of this project bear no responsibility.
|
||||
|
||||
USE AT YOUR OWN RISK.
|
||||
|
||||
## Deploy with docker-compose
|
||||
|
||||
docker-compose.yml contains a stack that will automatically provision SSL certificates for your domain name and will add a https redirect to port 80.
|
||||
|
@ -46,6 +25,7 @@ wget https://raw.githubusercontent.com/tornadocash/tornado-relayer/master/.env.e
|
|||
- update `AGGREGATOR` if needed - Contract address of aggregator instance.
|
||||
- update `CONFIRMATIONS` if needed - how many block confirmations to wait before processing an event. Not recommended to set less than 3
|
||||
- update `MAX_GAS_PRICE` if needed - maximum value of gwei value for relayer's transaction
|
||||
- update `BASE_FEE_RESERVE_PERCENTAGE` if needed - how much in % will the network baseFee increase
|
||||
|
||||
If you want to use more than 1 eth address for relaying transactions, please add as many `workers` as you want. For example, you can comment out `worker2` in docker-compose.yml file, but please use a different `PRIVATE_KEY` for each worker.
|
||||
|
||||
|
@ -53,10 +33,10 @@ wget https://raw.githubusercontent.com/tornadocash/tornado-relayer/master/.env.e
|
|||
|
||||
## Run locally
|
||||
|
||||
1. `npm i`
|
||||
1. `yarn`
|
||||
2. `cp .env.example .env`
|
||||
3. Modify `.env` as needed
|
||||
4. `npm run start`
|
||||
4. `yarn start`
|
||||
5. Go to `http://127.0.0.1:8000`
|
||||
6. In order to execute withdraw request, you can run following command
|
||||
|
||||
|
@ -69,6 +49,14 @@ Relayer should return a transaction hash
|
|||
In that case you will need to add https termination yourself because browsers with default settings will prevent https
|
||||
tornado.cash UI from submitting your request over http connection
|
||||
|
||||
## Run geth node
|
||||
|
||||
It is strongly recommended that you use your own RPC node. Instruction on how to run full node with `geth` can be found [here](https://github.com/feshchenkod/rpc-nodes).
|
||||
|
||||
## Monitoring
|
||||
|
||||
You can find the guide on how to install the Zabbix server in the [/monitoring/README.md](/monitoring/README.md).
|
||||
|
||||
## Architecture
|
||||
|
||||
1. TreeWatcher module keeps track of Account Tree changes and automatically caches the actual state in Redis and emits `treeUpdate` event to redis pub/sub channel
|
||||
|
|
|
@ -1,12 +1,181 @@
|
|||
[
|
||||
{
|
||||
"inputs": [
|
||||
{ "internalType": "contract MultiWrapper", "name": "_multiWrapper", "type": "address" },
|
||||
{ "internalType": "contract IOracle[]", "name": "existingOracles", "type": "address[]" },
|
||||
{ "internalType": "enum OffchainOracle.OracleType[]", "name": "oracleTypes", "type": "uint8[]" },
|
||||
{ "internalType": "contract IERC20[]", "name": "existingConnectors", "type": "address[]" },
|
||||
{ "internalType": "contract IERC20", "name": "wBase", "type": "address" }
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{ "indexed": false, "internalType": "contract IERC20", "name": "connector", "type": "address" }
|
||||
],
|
||||
"name": "ConnectorAdded",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{ "indexed": false, "internalType": "contract IERC20", "name": "connector", "type": "address" }
|
||||
],
|
||||
"name": "ConnectorRemoved",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{ "indexed": false, "internalType": "contract MultiWrapper", "name": "multiWrapper", "type": "address" }
|
||||
],
|
||||
"name": "MultiWrapperUpdated",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{ "indexed": false, "internalType": "contract IOracle", "name": "oracle", "type": "address" },
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "enum OffchainOracle.OracleType",
|
||||
"name": "oracleType",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"name": "OracleAdded",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{ "indexed": false, "internalType": "contract IOracle", "name": "oracle", "type": "address" },
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "enum OffchainOracle.OracleType",
|
||||
"name": "oracleType",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"name": "OracleRemoved",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{ "indexed": true, "internalType": "address", "name": "previousOwner", "type": "address" },
|
||||
{ "indexed": true, "internalType": "address", "name": "newOwner", "type": "address" }
|
||||
],
|
||||
"name": "OwnershipTransferred",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"inputs": [{ "internalType": "contract IERC20", "name": "connector", "type": "address" }],
|
||||
"name": "addConnector",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{ "internalType": "contract IOracle", "name": "oracle", "type": "address" },
|
||||
{ "internalType": "enum OffchainOracle.OracleType", "name": "oracleKind", "type": "uint8" }
|
||||
],
|
||||
"name": "addOracle",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "connectors",
|
||||
"outputs": [{ "internalType": "contract IERC20[]", "name": "allConnectors", "type": "address[]" }],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{ "internalType": "contract IERC20", "name": "srcToken", "type": "address" },
|
||||
{ "internalType": "contract IERC20", "name": "dstToken", "type": "address" }
|
||||
{ "internalType": "contract IERC20", "name": "dstToken", "type": "address" },
|
||||
{ "internalType": "bool", "name": "useWrappers", "type": "bool" }
|
||||
],
|
||||
"name": "getRate",
|
||||
"outputs": [{ "internalType": "uint256", "name": "weightedRate", "type": "uint256" }],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{ "internalType": "contract IERC20", "name": "srcToken", "type": "address" },
|
||||
{ "internalType": "bool", "name": "useSrcWrappers", "type": "bool" }
|
||||
],
|
||||
"name": "getRateToEth",
|
||||
"outputs": [{ "internalType": "uint256", "name": "weightedRate", "type": "uint256" }],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "multiWrapper",
|
||||
"outputs": [{ "internalType": "contract MultiWrapper", "name": "", "type": "address" }],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "oracles",
|
||||
"outputs": [
|
||||
{ "internalType": "contract IOracle[]", "name": "allOracles", "type": "address[]" },
|
||||
{ "internalType": "enum OffchainOracle.OracleType[]", "name": "oracleTypes", "type": "uint8[]" }
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "owner",
|
||||
"outputs": [{ "internalType": "address", "name": "", "type": "address" }],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [{ "internalType": "contract IERC20", "name": "connector", "type": "address" }],
|
||||
"name": "removeConnector",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{ "internalType": "contract IOracle", "name": "oracle", "type": "address" },
|
||||
{ "internalType": "enum OffchainOracle.OracleType", "name": "oracleKind", "type": "uint8" }
|
||||
],
|
||||
"name": "removeOracle",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "renounceOwnership",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [{ "internalType": "contract MultiWrapper", "name": "_multiWrapper", "type": "address" }],
|
||||
"name": "setMultiWrapper",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [{ "internalType": "address", "name": "newOwner", "type": "address" }],
|
||||
"name": "transferOwnership",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
|
|
2
app.js
2
app.js
|
@ -1 +1 @@
|
|||
module.exports = require('./src/index')
|
||||
module.exports = require('./src/server')
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -54,6 +54,8 @@ services:
|
|||
redis:
|
||||
image: redis
|
||||
restart: always
|
||||
ports:
|
||||
- '6379:6379'
|
||||
command: [redis-server, --appendonly, 'yes']
|
||||
volumes:
|
||||
- redis:/data
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
zabbix
|
|
@ -0,0 +1 @@
|
|||
zabbix
|
|
@ -0,0 +1,37 @@
|
|||
ZBX_HOSTNAME=Zabbix
|
||||
# ZBX_SOURCEIP=
|
||||
# ZBX_DEBUGLEVEL=3
|
||||
# ZBX_ENABLEREMOTECOMMANDS=0 # Deprecated since 5.0.0
|
||||
# ZBX_LOGREMOTECOMMANDS=0
|
||||
# ZBX_HOSTINTERFACE= # Available since 4.4.0
|
||||
# ZBX_HOSTINTERFACEITEM= # Available since 4.4.0
|
||||
# ZBX_SERVER_HOST=10.110.0.5
|
||||
# ZBX_PASSIVE_ALLOW=true
|
||||
# ZBX_PASSIVESERVERS=
|
||||
# ZBX_ACTIVE_ALLOW=true
|
||||
# ZBX_ACTIVESERVERS=
|
||||
# ZBX_LISTENIP=
|
||||
# ZBX_STARTAGENTS=3
|
||||
# ZBX_HOSTNAMEITEM=system.hostname
|
||||
# ZBX_METADATA=
|
||||
# ZBX_METADATAITEM=
|
||||
# ZBX_REFRESHACTIVECHECKS=120
|
||||
# ZBX_BUFFERSEND=5
|
||||
# ZBX_BUFFERSIZE=100
|
||||
# ZBX_MAXLINESPERSECOND=20
|
||||
# ZBX_ALIAS=""
|
||||
# ZBX_TIMEOUT=3
|
||||
# ZBX_UNSAFEUSERPARAMETERS=0
|
||||
# ZBX_LOADMODULE="dummy1.so,dummy2.so,dummy10.so"
|
||||
# ZBX_TLSCONNECT=unencrypted
|
||||
# ZBX_TLSACCEPT=unencrypted
|
||||
# ZBX_TLSCAFILE=
|
||||
# ZBX_TLSCRLFILE=
|
||||
# ZBX_TLSSERVERCERTISSUER=
|
||||
# ZBX_TLSSERVERCERTSUBJECT=
|
||||
# ZBX_TLSCERTFILE=
|
||||
# ZBX_TLSKEYFILE=
|
||||
# ZBX_TLSPSKIDENTITY=
|
||||
# ZBX_TLSPSKFILE=
|
||||
# ZBX_DENYKEY=system.run[*]
|
||||
# ZBX_ALLOWKEY=
|
|
@ -0,0 +1,9 @@
|
|||
# DB_SERVER_HOST=postgres-server
|
||||
# DB_SERVER_PORT=5432
|
||||
# POSTGRES_USER=zabbix
|
||||
POSTGRES_USER_FILE=/run/secrets/POSTGRES_USER
|
||||
# POSTGRES_PASSWORD=zabbix
|
||||
POSTGRES_PASSWORD_FILE=/run/secrets/POSTGRES_PASSWORD
|
||||
POSTGRES_DB=zabbix
|
||||
# DB_SERVER_SCHEMA=public
|
||||
# ENABLE_TIMESCALEDB=tru
|
|
@ -0,0 +1,60 @@
|
|||
# ZBX_LISTENIP=
|
||||
# ZBX_HISTORYSTORAGEURL=http://elasticsearch:9200/ # Available since 3.4.5
|
||||
# ZBX_HISTORYSTORAGETYPES=uint,dbl,str,log,text # Available since 3.4.5
|
||||
# ZBX_DBTLSCONNECT=required # Available since 5.0.0
|
||||
# ZBX_DBTLSCAFILE=/run/secrets/root-ca.pem # Available since 5.0.0
|
||||
# ZBX_DBTLSCERTFILE=/run/secrets/client-cert.pem # Available since 5.0.0
|
||||
# ZBX_DBTLSKEYFILE=/run/secrets/client-key.pem # Available since 5.0.0
|
||||
# ZBX_DBTLSCIPHER= # Available since 5.0.0
|
||||
# ZBX_DBTLSCIPHER13= # Available since 5.0.0
|
||||
# ZBX_DEBUGLEVEL=3
|
||||
# ZBX_STARTPOLLERS=5
|
||||
# ZBX_IPMIPOLLERS=0
|
||||
# ZBX_STARTPREPROCESSORS=3 # Available since 3.4.0
|
||||
# ZBX_STARTPOLLERSUNREACHABLE=1
|
||||
# ZBX_STARTTRAPPERS=5
|
||||
# ZBX_STARTPINGERS=1
|
||||
# ZBX_STARTDISCOVERERS=1
|
||||
# ZBX_STARTHTTPPOLLERS=1
|
||||
# ZBX_STARTTIMERS=1
|
||||
# ZBX_STARTESCALATORS=1
|
||||
# ZBX_STARTALERTERS=3 # Available since 3.4.0
|
||||
# ZBX_JAVAGATEWAY_ENABLE=true
|
||||
# ZBX_JAVAGATEWAY=zabbix-java-gateway
|
||||
# ZBX_JAVAGATEWAYPORT=10052
|
||||
# ZBX_STARTJAVAPOLLERS=5
|
||||
# ZBX_STARTVMWARECOLLECTORS=0
|
||||
# ZBX_VMWAREFREQUENCY=60
|
||||
# ZBX_VMWAREPERFFREQUENCY=60
|
||||
# ZBX_VMWARECACHESIZE=8M
|
||||
# ZBX_VMWARETIMEOUT=10
|
||||
# ZBX_ENABLE_SNMP_TRAPS=true
|
||||
# ZBX_SOURCEIP=
|
||||
# ZBX_HOUSEKEEPINGFREQUENCY=1
|
||||
# ZBX_MAXHOUSEKEEPERDELETE=5000
|
||||
# ZBX_SENDERFREQUENCY=30
|
||||
# ZBX_CACHESIZE=8M
|
||||
# ZBX_CACHEUPDATEFREQUENCY=60
|
||||
# ZBX_STARTDBSYNCERS=4
|
||||
# ZBX_HISTORYCACHESIZE=16M
|
||||
# ZBX_HISTORYINDEXCACHESIZE=4M
|
||||
# ZBX_TRENDCACHESIZE=4M
|
||||
# ZBX_VALUECACHESIZE=8M
|
||||
# ZBX_TIMEOUT=4
|
||||
# ZBX_TRAPPERIMEOUT=300
|
||||
# ZBX_UNREACHABLEPERIOD=45
|
||||
# ZBX_UNAVAILABLEDELAY=60
|
||||
# ZBX_UNREACHABLEDELAY=15
|
||||
# ZBX_LOGSLOWQUERIES=3000
|
||||
# ZBX_EXPORTFILESIZE=
|
||||
# ZBX_STARTPROXYPOLLERS=1
|
||||
# ZBX_PROXYCONFIGFREQUENCY=3600
|
||||
# ZBX_PROXYDATAFREQUENCY=1
|
||||
# ZBX_LOADMODULE="dummy1.so,dummy2.so,dummy10.so"
|
||||
# ZBX_TLSCAFILE=
|
||||
# ZBX_TLSCRLFILE=
|
||||
# ZBX_TLSCERTFILE=
|
||||
# ZBX_TLSKEYFILE=
|
||||
# ZBX_VAULTDBPATH=
|
||||
# ZBX_VAULTURL=https://127.0.0.1:8200
|
||||
# VAULT_TOKEN=
|
|
@ -0,0 +1,26 @@
|
|||
# ZBX_SERVER_HOST=zabbix-server
|
||||
# ZBX_SERVER_PORT=10051
|
||||
# ZBX_SERVER_NAME=Monitoring
|
||||
# ZBX_DB_ENCRYPTION=true # Available since 5.0.0
|
||||
# ZBX_DB_KEY_FILE=/run/secrets/client-key.pem # Available since 5.0.0
|
||||
# ZBX_DB_CERT_FILE=/run/secrets/client-cert.pem # Available since 5.0.0
|
||||
# ZBX_DB_CA_FILE=/run/secrets/root-ca.pem # Available since 5.0.0
|
||||
# ZBX_DB_VERIFY_HOST=false # Available since 5.0.0
|
||||
# ZBX_DB_CIPHER_LIST= # Available since 5.0.0
|
||||
# ZBX_VAULTDBPATH=
|
||||
# ZBX_VAULTURL=https://127.0.0.1:8200
|
||||
# VAULT_TOKEN=
|
||||
# ZBX_HISTORYSTORAGEURL=http://elasticsearch:9200/ # Available since 3.4.5
|
||||
# ZBX_HISTORYSTORAGETYPES=['uint', 'dbl', 'str', 'text', 'log'] # Available since 3.4.5
|
||||
# ENABLE_WEB_ACCESS_LOG=true
|
||||
# ZBX_MAXEXECUTIONTIME=600
|
||||
# ZBX_MEMORYLIMIT=128M
|
||||
# ZBX_POSTMAXSIZE=16M
|
||||
# ZBX_UPLOADMAXFILESIZE=2M
|
||||
# ZBX_MAXINPUTTIME=300
|
||||
# ZBX_SESSION_NAME=zbx_sessionid
|
||||
# Timezone one of: http://php.net/manual/en/timezones.php
|
||||
# PHP_TZ=Europe/Riga
|
||||
# ZBX_DENY_GUI_ACCESS=false
|
||||
# ZBX_GUI_ACCESS_IP_RANGE=['127.0.0.1']
|
||||
# ZBX_GUI_WARNING_MSG=Zabbix is under maintenance.
|
|
@ -0,0 +1,67 @@
|
|||
# Installing the Zabbix server
|
||||
|
||||
Change default passwords, ports and set listen IP (ports `8080/tcp` and `10051/tcp` will be open on all interfaces, use a firewall or specify the address of the required interface), then run:
|
||||
|
||||
```bash
|
||||
wget https://github.com/tornadocash/tornado-relayer/raw/master/monitoring/zabbix.tar.gz
|
||||
mkdir $HOME/monitoring/
|
||||
tar -xzf zabbix.tar.gz -C $HOME/monitoring/
|
||||
cd $HOME/monitoring/
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
# Installing the Zabbix agent
|
||||
|
||||
Download package from repository [https://repo.zabbix.com/zabbix/5.2/ubuntu/pool/main/z/zabbix/](https://repo.zabbix.com/zabbix/5.2/ubuntu/pool/main/z/zabbix/) and run:
|
||||
|
||||
```bash
|
||||
sudo dpkg -i zabbix-agent_5.2.*.deb
|
||||
sudo usermod -aG docker zabbix
|
||||
```
|
||||
|
||||
Change default values in `/etc/zabbix/zabbix_agent2.conf`:
|
||||
|
||||
- `Hostname` the same as in the zabbix-server web interface;
|
||||
- `Server` and `ServerActive` set zabbix server IP or DNS name;
|
||||
- `ListenIP` to local network IP available from zabbix server or set firewall rules to restrict access to port `10050`;
|
||||
- uncomment `Plugins.Docker.Endpoint=unix:///var/run/docker.sock`.
|
||||
|
||||
Then run:
|
||||
|
||||
```bash
|
||||
sudo systemctl enable zabbix-agent2.service
|
||||
sudo systemctl restart zabbix-agent2.service
|
||||
```
|
||||
|
||||
# Adding the host
|
||||
|
||||
Log into your Zabbix server (defaul login and passord: `Admin` - `zabbix`) and click on the Configuration tab and then the Hosts tab. Click the Create host button near the top right corner. In the resulting page, change the Host name and IP ADDRESS sections to match the information for your remote server. Set `{$URL}` macros to relayer host, example `http://localhost/v1/status` or `https://domain.name/v1/status`.
|
||||
|
||||
# Import templates
|
||||
|
||||
Import templates using the WebUI:
|
||||
|
||||
- [Docker-template.yaml](/monitoring/templates/Docker-template.yaml);
|
||||
- [Tornado-relayer-template.yaml](/monitoring/templates/Tornado-relayer-template.yaml).
|
||||
|
||||
Link templates with added host. It is also recommended to link `Linux CPU by Zabbix agent`, `Linux filesystems by Zabbix agent` and `Linux memory by Zabbix agent` templates to the host.
|
||||
|
||||
# Alerts
|
||||
|
||||
In WebUI - Administration -> Media types -> Telegram:
|
||||
|
||||
```
|
||||
https://git.zabbix.com/projects/ZBX/repos/zabbix/browse/templates/media/telegram
|
||||
|
||||
1. Register bot: send "/newbot" to @BotFather and follow instructions
|
||||
2. Copy and paste the obtained token into the "Token" field above
|
||||
3. If you want to send personal notifications, you need to get chat id of the user you want to send messages to:
|
||||
3.1. Send "/getid" to "@myidbot" in Telegram messenger
|
||||
3.2. Copy returned chat id and save it in the "Telegram Webhook" media for the user
|
||||
3.3. Ask the user to send "/start" to your bot (Telegram bot won't send anything to the user without it)
|
||||
4. If you want to send group notifications, you need to get group id of the group you want to send messages to:
|
||||
4.1. Add "@myidbot" to your group
|
||||
4.2. Send "/getgroupid@myidbot" in your group
|
||||
4.3. Copy returned group id save it in the "Telegram Webhook" media for the user you created for group notifications
|
||||
4.4. Send "/start@your_bot_name_here" in your group (Telegram bot won't send anything to the group without it)
|
||||
```
|
|
@ -0,0 +1,186 @@
|
|||
# Restrict access to 10051/tcp on public ip
|
||||
|
||||
version: '3.5'
|
||||
services:
|
||||
zabbix-server:
|
||||
image: zabbix/zabbix-server-pgsql:alpine-5.2-latest
|
||||
restart: always
|
||||
ports:
|
||||
- '10051:10051'
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- ./zbx_env/usr/lib/zabbix/alertscripts:/usr/lib/zabbix/alertscripts:ro
|
||||
- ./zbx_env/usr/lib/zabbix/externalscripts:/usr/lib/zabbix/externalscripts:ro
|
||||
- ./zbx_env/var/lib/zabbix/export:/var/lib/zabbix/export:rw
|
||||
- ./zbx_env/var/lib/zabbix/modules:/var/lib/zabbix/modules:ro
|
||||
- ./zbx_env/var/lib/zabbix/enc:/var/lib/zabbix/enc:ro
|
||||
- ./zbx_env/var/lib/zabbix/ssh_keys:/var/lib/zabbix/ssh_keys:ro
|
||||
- ./zbx_env/var/lib/zabbix/mibs:/var/lib/zabbix/mibs:ro
|
||||
- ./zbx_env/var/lib/zabbix/snmptraps:/var/lib/zabbix/snmptraps:ro
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 20000
|
||||
hard: 40000
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.70'
|
||||
memory: 1G
|
||||
reservations:
|
||||
cpus: '0.5'
|
||||
memory: 512M
|
||||
env_file:
|
||||
- .env_db_pgsql
|
||||
- .env_srv
|
||||
secrets:
|
||||
- POSTGRES_USER
|
||||
- POSTGRES_PASSWORD
|
||||
depends_on:
|
||||
- postgres-server
|
||||
networks:
|
||||
zbx_net_backend:
|
||||
aliases:
|
||||
- zabbix-server
|
||||
- zabbix-server-pgsql
|
||||
- zabbix-server-alpine-pgsql
|
||||
- zabbix-server-pgsql-alpine
|
||||
zbx_net_frontend:
|
||||
stop_grace_period: 30s
|
||||
sysctls:
|
||||
- net.ipv4.ip_local_port_range=1024 65000
|
||||
- net.ipv4.conf.all.accept_redirects=0
|
||||
- net.ipv4.conf.all.secure_redirects=0
|
||||
- net.ipv4.conf.all.send_redirects=0
|
||||
labels:
|
||||
com.zabbix.description: 'Zabbix server with PostgreSQL database support'
|
||||
com.zabbix.company: 'Zabbix LLC'
|
||||
com.zabbix.component: 'zabbix-server'
|
||||
com.zabbix.dbtype: 'pgsql'
|
||||
com.zabbix.os: 'alpine'
|
||||
|
||||
zabbix-web:
|
||||
image: zabbix/zabbix-web-nginx-pgsql:alpine-5.2-latest
|
||||
restart: always
|
||||
ports:
|
||||
- '8080:8080'
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- ./zbx_env/etc/ssl/nginx:/etc/ssl/nginx:ro
|
||||
- ./zbx_env/usr/share/zabbix/modules/:/usr/share/zabbix/modules/:ro
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.70'
|
||||
memory: 512M
|
||||
reservations:
|
||||
cpus: '0.5'
|
||||
memory: 256M
|
||||
env_file:
|
||||
- .env_db_pgsql
|
||||
- .env_web
|
||||
secrets:
|
||||
- POSTGRES_USER
|
||||
- POSTGRES_PASSWORD
|
||||
depends_on:
|
||||
- postgres-server
|
||||
- zabbix-server
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:8080/']
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
networks:
|
||||
zbx_net_backend:
|
||||
aliases:
|
||||
- zabbix-web-nginx-pgsql
|
||||
- zabbix-web-nginx-alpine-pgsql
|
||||
- zabbix-web-nginx-pgsql-alpine
|
||||
zbx_net_frontend:
|
||||
stop_grace_period: 10s
|
||||
sysctls:
|
||||
- net.core.somaxconn=65535
|
||||
labels:
|
||||
com.zabbix.description: 'Zabbix frontend on Nginx web-server with PostgreSQL database support'
|
||||
com.zabbix.company: 'Zabbix LLC'
|
||||
com.zabbix.component: 'zabbix-frontend'
|
||||
com.zabbix.webserver: 'nginx'
|
||||
com.zabbix.dbtype: 'pgsql'
|
||||
com.zabbix.os: 'alpine'
|
||||
|
||||
zabbix-agent:
|
||||
image: zabbix/zabbix-agent2:alpine-5.2-latest
|
||||
restart: always
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
env_file:
|
||||
- .env_agent
|
||||
privileged: true
|
||||
user: root
|
||||
pid: 'host'
|
||||
networks:
|
||||
zbx_net_backend:
|
||||
aliases:
|
||||
- zabbix-agent
|
||||
- zabbix-agent-passive
|
||||
- zabbix-agent-alpine
|
||||
stop_grace_period: 5s
|
||||
|
||||
postgres-server:
|
||||
image: postgres:alpine
|
||||
restart: always
|
||||
volumes:
|
||||
- ./zbx_env/var/lib/postgresql/data:/var/lib/postgresql/data:rw
|
||||
env_file:
|
||||
- .env_db_pgsql
|
||||
secrets:
|
||||
- POSTGRES_USER
|
||||
- POSTGRES_PASSWORD
|
||||
stop_grace_period: 1m
|
||||
networks:
|
||||
zbx_net_backend:
|
||||
aliases:
|
||||
- postgres-server
|
||||
- pgsql-server
|
||||
- pgsql-database
|
||||
|
||||
portainer:
|
||||
image: portainer/portainer:latest
|
||||
restart: always
|
||||
ports:
|
||||
- '9000:9000'
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- portainer-data:/data
|
||||
|
||||
networks:
|
||||
zbx_net_frontend:
|
||||
driver: bridge
|
||||
driver_opts:
|
||||
com.docker.network.enable_ipv6: 'false'
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.16.238.0/24
|
||||
zbx_net_backend:
|
||||
driver: bridge
|
||||
driver_opts:
|
||||
com.docker.network.enable_ipv6: 'false'
|
||||
internal: true
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.16.239.0/24
|
||||
|
||||
secrets:
|
||||
POSTGRES_USER:
|
||||
file: ./.POSTGRES_USER
|
||||
POSTGRES_PASSWORD:
|
||||
file: ./.POSTGRES_PASSWORD
|
||||
|
||||
volumes:
|
||||
portainer-data:
|
|
@ -0,0 +1,393 @@
|
|||
zabbix_export:
|
||||
version: '5.2'
|
||||
date: '2021-11-29T12:29:17Z'
|
||||
groups:
|
||||
- name: Docker
|
||||
templates:
|
||||
- template: Docker
|
||||
name: Docker
|
||||
description: |
|
||||
Get Docker engine metrics from plugin for the New Zabbix Agent (zabbix-agent2).
|
||||
|
||||
You can discuss this template or leave feedback on our forum
|
||||
|
||||
Template tooling version used: 0.38
|
||||
groups:
|
||||
- name: Docker
|
||||
applications:
|
||||
- name: Docker
|
||||
- name: 'Zabbix raw items'
|
||||
items:
|
||||
- name: 'Docker: Get containers'
|
||||
key: docker.containers
|
||||
history: '0'
|
||||
trends: '0'
|
||||
value_type: TEXT
|
||||
applications:
|
||||
- name: 'Zabbix raw items'
|
||||
- name: 'Docker: Containers paused'
|
||||
type: DEPENDENT
|
||||
key: docker.containers.paused
|
||||
delay: '0'
|
||||
history: 7d
|
||||
description: 'Total number of containers paused on this host'
|
||||
applications:
|
||||
- name: Docker
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.ContainersPaused
|
||||
master_item:
|
||||
key: docker.info
|
||||
- name: 'Docker: Containers running'
|
||||
type: DEPENDENT
|
||||
key: docker.containers.running
|
||||
delay: '0'
|
||||
history: 7d
|
||||
description: 'Total number of containers running on this host'
|
||||
applications:
|
||||
- name: Docker
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.ContainersRunning
|
||||
master_item:
|
||||
key: docker.info
|
||||
- name: 'Docker: Containers stopped'
|
||||
type: DEPENDENT
|
||||
key: docker.containers.stopped
|
||||
delay: '0'
|
||||
history: 7d
|
||||
description: 'Total number of containers stopped on this host'
|
||||
applications:
|
||||
- name: Docker
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.ContainersStopped
|
||||
master_item:
|
||||
key: docker.info
|
||||
triggers:
|
||||
- expression: '{avg(5m)}>=1'
|
||||
name: 'Docker: containers is stopped'
|
||||
priority: HIGH
|
||||
- name: 'Docker: Containers total'
|
||||
type: DEPENDENT
|
||||
key: docker.containers.total
|
||||
delay: '0'
|
||||
history: 7d
|
||||
description: 'Total number of containers on this host'
|
||||
applications:
|
||||
- name: Docker
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.Containers
|
||||
master_item:
|
||||
key: docker.info
|
||||
- name: 'Docker: Get images'
|
||||
key: docker.images
|
||||
history: '0'
|
||||
trends: '0'
|
||||
status: DISABLED
|
||||
value_type: TEXT
|
||||
applications:
|
||||
- name: 'Zabbix raw items'
|
||||
- name: 'Docker: Get info'
|
||||
key: docker.info
|
||||
history: '0'
|
||||
trends: '0'
|
||||
value_type: TEXT
|
||||
applications:
|
||||
- name: 'Zabbix raw items'
|
||||
- name: 'Docker: Memory total'
|
||||
type: DEPENDENT
|
||||
key: docker.mem.total
|
||||
delay: '0'
|
||||
history: 7d
|
||||
status: DISABLED
|
||||
units: B
|
||||
applications:
|
||||
- name: Docker
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.MemTotal
|
||||
master_item:
|
||||
key: docker.info
|
||||
- name: 'Docker: Ping'
|
||||
key: docker.ping
|
||||
history: 7h
|
||||
applications:
|
||||
- name: Docker
|
||||
valuemap:
|
||||
name: 'Service state'
|
||||
preprocessing:
|
||||
- type: DISCARD_UNCHANGED_HEARTBEAT
|
||||
parameters:
|
||||
- 10m
|
||||
triggers:
|
||||
- expression: '{last()}=0'
|
||||
name: 'Docker: Service is down'
|
||||
priority: AVERAGE
|
||||
manual_close: 'YES'
|
||||
discovery_rules:
|
||||
- name: 'Containers discovery'
|
||||
key: 'docker.containers.discovery[true]'
|
||||
delay: 15m
|
||||
filter:
|
||||
evaltype: AND
|
||||
conditions:
|
||||
- macro: '{#NAME}'
|
||||
value: '{$DOCKER.LLD.FILTER.CONTAINER.MATCHES}'
|
||||
formulaid: A
|
||||
- macro: '{#NAME}'
|
||||
value: '{$DOCKER.LLD.FILTER.CONTAINER.NOT_MATCHES}'
|
||||
operator: NOT_MATCHES_REGEX
|
||||
formulaid: B
|
||||
description: |
|
||||
Discovery for containers metrics
|
||||
|
||||
Parameter:
|
||||
true - Returns all containers
|
||||
false - Returns only running containers
|
||||
item_prototypes:
|
||||
- name: 'Container {#NAME}: Finished at'
|
||||
type: DEPENDENT
|
||||
key: 'docker.container_info.finished["{#NAME}"]'
|
||||
delay: '0'
|
||||
history: 7d
|
||||
value_type: FLOAT
|
||||
units: unixtime
|
||||
application_prototypes:
|
||||
- name: 'Docker: Container {#NAME}'
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.State.FinishedAt
|
||||
- type: DISCARD_UNCHANGED_HEARTBEAT
|
||||
parameters:
|
||||
- 1d
|
||||
master_item:
|
||||
key: 'docker.container_info["{#NAME}"]'
|
||||
- name: 'Container {#NAME}: Restart count'
|
||||
type: DEPENDENT
|
||||
key: 'docker.container_info.restart_count["{#NAME}"]'
|
||||
delay: '0'
|
||||
history: 7d
|
||||
application_prototypes:
|
||||
- name: 'Docker: Container {#NAME}'
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.RestartCount
|
||||
master_item:
|
||||
key: 'docker.container_info["{#NAME}"]'
|
||||
trigger_prototypes:
|
||||
- expression: '{last()}>5'
|
||||
name: 'Container {#NAME}: restarting constantly'
|
||||
opdata: '{ITEM.VALUE}'
|
||||
priority: HIGH
|
||||
- name: 'Container {#NAME}: Started at'
|
||||
type: DEPENDENT
|
||||
key: 'docker.container_info.started["{#NAME}"]'
|
||||
delay: '0'
|
||||
history: 7d
|
||||
value_type: FLOAT
|
||||
units: unixtime
|
||||
application_prototypes:
|
||||
- name: 'Docker: Container {#NAME}'
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.State.StartedAt
|
||||
- type: DISCARD_UNCHANGED_HEARTBEAT
|
||||
parameters:
|
||||
- 1d
|
||||
master_item:
|
||||
key: 'docker.container_info["{#NAME}"]'
|
||||
- name: 'Container {#NAME}: Error'
|
||||
type: DEPENDENT
|
||||
key: 'docker.container_info.state.error["{#NAME}"]'
|
||||
delay: '0'
|
||||
history: 7d
|
||||
trends: '0'
|
||||
value_type: CHAR
|
||||
application_prototypes:
|
||||
- name: 'Docker: Container {#NAME}'
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.State.Error
|
||||
- type: DISCARD_UNCHANGED_HEARTBEAT
|
||||
parameters:
|
||||
- 1d
|
||||
master_item:
|
||||
key: 'docker.container_info["{#NAME}"]'
|
||||
trigger_prototypes:
|
||||
- expression: '{diff()}=1 and {strlen()}>0'
|
||||
name: 'Container {#NAME}: An error has occurred in the container'
|
||||
priority: WARNING
|
||||
description: 'Container {#NAME} has an error. Ack to close.'
|
||||
manual_close: 'YES'
|
||||
- name: 'Container {#NAME}: Exit code'
|
||||
type: DEPENDENT
|
||||
key: 'docker.container_info.state.exitcode["{#NAME}"]'
|
||||
delay: '0'
|
||||
history: 7d
|
||||
application_prototypes:
|
||||
- name: 'Docker: Container {#NAME}'
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.State.ExitCode
|
||||
- type: DISCARD_UNCHANGED_HEARTBEAT
|
||||
parameters:
|
||||
- 1d
|
||||
master_item:
|
||||
key: 'docker.container_info["{#NAME}"]'
|
||||
- name: 'Container {#NAME}: Paused'
|
||||
type: DEPENDENT
|
||||
key: 'docker.container_info.state.paused["{#NAME}"]'
|
||||
delay: '0'
|
||||
history: 7d
|
||||
application_prototypes:
|
||||
- name: 'Docker: Container {#NAME}'
|
||||
valuemap:
|
||||
name: 'Docker flag'
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.State.Paused
|
||||
- type: BOOL_TO_DECIMAL
|
||||
parameters:
|
||||
- ''
|
||||
master_item:
|
||||
key: 'docker.container_info["{#NAME}"]'
|
||||
- name: 'Container {#NAME}: Restarting'
|
||||
type: DEPENDENT
|
||||
key: 'docker.container_info.state.restarting["{#NAME}"]'
|
||||
delay: '0'
|
||||
history: 7d
|
||||
application_prototypes:
|
||||
- name: 'Docker: Container {#NAME}'
|
||||
valuemap:
|
||||
name: 'Docker flag'
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.State.Restarting
|
||||
- type: BOOL_TO_DECIMAL
|
||||
parameters:
|
||||
- ''
|
||||
master_item:
|
||||
key: 'docker.container_info["{#NAME}"]'
|
||||
- name: 'Container {#NAME}: Running'
|
||||
type: DEPENDENT
|
||||
key: 'docker.container_info.state.running["{#NAME}"]'
|
||||
delay: '0'
|
||||
history: 7d
|
||||
application_prototypes:
|
||||
- name: 'Docker: Container {#NAME}'
|
||||
valuemap:
|
||||
name: 'Docker flag'
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.State.Running
|
||||
- type: BOOL_TO_DECIMAL
|
||||
parameters:
|
||||
- ''
|
||||
master_item:
|
||||
key: 'docker.container_info["{#NAME}"]'
|
||||
- name: 'Container {#NAME}: Status'
|
||||
type: DEPENDENT
|
||||
key: 'docker.container_info.state.status["{#NAME}"]'
|
||||
delay: '0'
|
||||
history: 7d
|
||||
trends: '0'
|
||||
value_type: CHAR
|
||||
application_prototypes:
|
||||
- name: 'Docker: Container {#NAME}'
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.State.Status
|
||||
- type: DISCARD_UNCHANGED_HEARTBEAT
|
||||
parameters:
|
||||
- 1h
|
||||
master_item:
|
||||
key: 'docker.container_info["{#NAME}"]'
|
||||
- name: 'Container {#NAME}: Get info'
|
||||
key: 'docker.container_info["{#NAME}"]'
|
||||
history: '0'
|
||||
trends: '0'
|
||||
value_type: CHAR
|
||||
description: 'Return low-level information about a container'
|
||||
application_prototypes:
|
||||
- name: 'Docker: Container {#NAME}'
|
||||
trigger_prototypes:
|
||||
- expression: '{Docker:docker.container_info.state.exitcode["{#NAME}"].last()}>0 and {Docker:docker.container_info.state.running["{#NAME}"].last()}=0'
|
||||
name: 'Container {#NAME}: Container has been stopped with error code'
|
||||
opdata: 'Exit code: {ITEM.LASTVALUE1}'
|
||||
priority: AVERAGE
|
||||
manual_close: 'YES'
|
||||
macros:
|
||||
- macro: '{$DOCKER.LLD.FILTER.CONTAINER.MATCHES}'
|
||||
value: '.*'
|
||||
description: 'Filter of discoverable containers'
|
||||
- macro: '{$DOCKER.LLD.FILTER.CONTAINER.NOT_MATCHES}'
|
||||
value: CHANGE_IF_NEEDED
|
||||
description: 'Filter to exclude discovered containers'
|
||||
- macro: '{$DOCKER.LLD.FILTER.IMAGE.MATCHES}'
|
||||
value: '.*'
|
||||
description: 'Filter of discoverable images'
|
||||
- macro: '{$DOCKER.LLD.FILTER.IMAGE.NOT_MATCHES}'
|
||||
value: CHANGE_IF_NEEDED
|
||||
description: 'Filter to exclude discovered images'
|
||||
graphs:
|
||||
- name: 'Docker: Containers'
|
||||
graph_items:
|
||||
- drawtype: GRADIENT_LINE
|
||||
color: 1A7C11
|
||||
item:
|
||||
host: Docker
|
||||
key: docker.containers.running
|
||||
- sortorder: '1'
|
||||
drawtype: BOLD_LINE
|
||||
color: 2774A4
|
||||
item:
|
||||
host: Docker
|
||||
key: docker.containers.paused
|
||||
- sortorder: '2'
|
||||
drawtype: BOLD_LINE
|
||||
color: F63100
|
||||
item:
|
||||
host: Docker
|
||||
key: docker.containers.stopped
|
||||
- sortorder: '3'
|
||||
drawtype: BOLD_LINE
|
||||
color: A54F10
|
||||
item:
|
||||
host: Docker
|
||||
key: docker.containers.total
|
||||
- name: 'Docker: Memory total'
|
||||
graph_items:
|
||||
- drawtype: BOLD_LINE
|
||||
color: 1A7C11
|
||||
item:
|
||||
host: Docker
|
||||
key: docker.mem.total
|
||||
value_maps:
|
||||
- name: 'Docker flag'
|
||||
mappings:
|
||||
- value: '0'
|
||||
newvalue: 'False'
|
||||
- value: '1'
|
||||
newvalue: 'True'
|
||||
- name: 'Service state'
|
||||
mappings:
|
||||
- value: '0'
|
||||
newvalue: Down
|
||||
- value: '1'
|
||||
newvalue: Up
|
|
@ -0,0 +1,70 @@
|
|||
zabbix_export:
|
||||
version: '5.2'
|
||||
date: '2021-12-01T13:26:59Z'
|
||||
groups:
|
||||
- name: Templates/Applications
|
||||
templates:
|
||||
- template: Tornado-relayer
|
||||
name: Tornado-relayer
|
||||
groups:
|
||||
- name: Templates/Applications
|
||||
items:
|
||||
- name: 'tornado-relayer: health.error'
|
||||
type: DEPENDENT
|
||||
key: tornado-relayer.health.error
|
||||
delay: '0'
|
||||
trends: '0'
|
||||
value_type: TEXT
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.health.error
|
||||
master_item:
|
||||
key: 'web.page.get[{$URL}]'
|
||||
triggers:
|
||||
- expression: '{last()}<>""'
|
||||
name: 'tornado-relayer: health error'
|
||||
priority: AVERAGE
|
||||
- name: 'tornado-relayer: health.status'
|
||||
type: DEPENDENT
|
||||
key: tornado-relayer.health.status
|
||||
delay: '0'
|
||||
trends: '0'
|
||||
value_type: TEXT
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- $.health.status
|
||||
master_item:
|
||||
key: 'web.page.get[{$URL}]'
|
||||
triggers:
|
||||
- expression: '{last(#3)}<>"true"'
|
||||
name: 'tornado-relayer: health status <> true'
|
||||
priority: HIGH
|
||||
- name: 'tornado-relayer: data'
|
||||
type: ZABBIX_ACTIVE
|
||||
key: 'web.page.get[{$URL}]'
|
||||
history: '0'
|
||||
trends: '0'
|
||||
value_type: TEXT
|
||||
preprocessing:
|
||||
- type: REGEX
|
||||
parameters:
|
||||
- '\n\s?\n([\s\S]*)'
|
||||
- \1
|
||||
httptests:
|
||||
- name: 'tornado-relayer: status page'
|
||||
agent: 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/80.0.3987.87 Chrome/80.0.3987.87 Safari/537.36'
|
||||
steps:
|
||||
- name: 'status page'
|
||||
url: '{$URL}'
|
||||
follow_redirects: 'NO'
|
||||
required: status
|
||||
status_codes: '200'
|
||||
triggers:
|
||||
- expression: '{Tornado-relayer:web.test.fail[tornado-relayer: status page].last()}>0'
|
||||
name: 'tornado-relayer: status page failed'
|
||||
priority: AVERAGE
|
||||
- expression: '{Tornado-relayer:web.test.rspcode[tornado-relayer: status page,status page].last(#3)}<>200'
|
||||
name: 'tornado-relayer: status page rspcode <>200'
|
||||
priority: HIGH
|
Binary file not shown.
10
package.json
10
package.json
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "relay",
|
||||
"version": "4.0.15",
|
||||
"version": "4.1.4",
|
||||
"description": "Relayer for Tornado.cash privacy solution. https://tornado.cash",
|
||||
"scripts": {
|
||||
"server": "node src/server.js",
|
||||
|
@ -26,12 +26,12 @@
|
|||
"eth-ens-namehash": "^2.0.8",
|
||||
"express": "^4.17.1",
|
||||
"fixed-merkle-tree": "^0.4.0",
|
||||
"gas-price-oracle": "^0.2.2",
|
||||
"gas-price-oracle": "^0.4.7",
|
||||
"ioredis": "^4.14.1",
|
||||
"node-fetch": "^2.6.0",
|
||||
"torn-token": "1.0.4",
|
||||
"node-fetch": "^2.6.7",
|
||||
"torn-token": "1.0.6",
|
||||
"tornado-anonymity-mining": "^2.1.2",
|
||||
"tx-manager": "^0.2.9",
|
||||
"tx-manager": "^0.4.8",
|
||||
"uuid": "^8.3.0",
|
||||
"web3": "^1.3.0",
|
||||
"web3-core-promievent": "^1.3.0",
|
||||
|
|
|
@ -8,7 +8,7 @@ module.exports = {
|
|||
httpRpcUrl: process.env.HTTP_RPC_URL,
|
||||
wsRpcUrl: process.env.WS_RPC_URL,
|
||||
oracleRpcUrl: process.env.ORACLE_RPC_URL || 'https://mainnet.infura.io/',
|
||||
offchainOracleAddress: '0x080AB73787A8B13EC7F40bd7d00d6CC07F9b24d0',
|
||||
offchainOracleAddress: '0x07D91f5fb9Bf7798734C3f606dB065549F6893bb',
|
||||
aggregatorAddress: process.env.AGGREGATOR,
|
||||
minerMerkleTreeHeight: 20,
|
||||
privateKey: process.env.PRIVATE_KEY,
|
||||
|
@ -18,12 +18,14 @@ module.exports = {
|
|||
tornadoServiceFee: Number(process.env.REGULAR_TORNADO_WITHDRAW_FEE),
|
||||
miningServiceFee: Number(process.env.MINING_SERVICE_FEE),
|
||||
rewardAccount: process.env.REWARD_ACCOUNT,
|
||||
governanceAddress: '0x5efda50f22d34F262c29268506C5Fa42cB56A1Ce',
|
||||
tornadoGoerliProxy: '0x454d870a72e29d5E5697f635128D18077BD04C60',
|
||||
gasLimits: {
|
||||
[jobType.TORNADO_WITHDRAW]: 390000,
|
||||
WITHDRAW_WITH_EXTRA: 480000,
|
||||
WITHDRAW_WITH_EXTRA: 700000,
|
||||
[jobType.MINING_REWARD]: 455000,
|
||||
[jobType.MINING_WITHDRAW]: 400000,
|
||||
},
|
||||
minimumBalance: '1000000000000000000',
|
||||
baseFeeReserve: Number(process.env.BASE_FEE_RESERVE_PERCENTAGE),
|
||||
}
|
||||
|
|
|
@ -2,9 +2,9 @@ const {
|
|||
getTornadoWithdrawInputError,
|
||||
getMiningRewardInputError,
|
||||
getMiningWithdrawInputError,
|
||||
} = require('./validator')
|
||||
const { postJob } = require('./queue')
|
||||
const { jobType } = require('./constants')
|
||||
} = require('../modules/validator')
|
||||
const { postJob } = require('../queue')
|
||||
const { jobType } = require('../constants')
|
||||
|
||||
async function tornadoWithdraw(req, res) {
|
||||
const inputError = getTornadoWithdrawInputError(req.body)
|
|
@ -0,0 +1,4 @@
|
|||
module.exports = {
|
||||
controller: require('./controller'),
|
||||
status: require('./status'),
|
||||
}
|
|
@ -1,13 +1,13 @@
|
|||
const queue = require('./queue')
|
||||
const { netId, tornadoServiceFee, miningServiceFee, instances, redisUrl, rewardAccount } = require('./config')
|
||||
const { version } = require('../package.json')
|
||||
const Redis = require('ioredis')
|
||||
const redis = new Redis(redisUrl)
|
||||
const queue = require('../queue')
|
||||
const { netId, tornadoServiceFee, miningServiceFee, instances, rewardAccount } = require('../config')
|
||||
const { version } = require('../../package.json')
|
||||
const { redis } = require('../modules/redis')
|
||||
const { readRelayerErrors } = require('../utils')
|
||||
|
||||
async function status(req, res) {
|
||||
const ethPrices = await redis.hgetall('prices')
|
||||
const health = await redis.hgetall('health')
|
||||
|
||||
health.errorsLog = await readRelayerErrors(redis)
|
||||
const { waiting: currentQueue } = await queue.queue.getJobCounts()
|
||||
|
||||
res.json({
|
|
@ -1,20 +1,14 @@
|
|||
const Web3 = require('web3')
|
||||
const Redis = require('ioredis')
|
||||
const { toBN, fromWei } = require('web3-utils')
|
||||
|
||||
const { setSafeInterval } = require('./utils')
|
||||
const { redisUrl, httpRpcUrl, privateKey, minimumBalance } = require('./config')
|
||||
|
||||
const web3 = new Web3(httpRpcUrl)
|
||||
const redis = new Redis(redisUrl)
|
||||
const { setSafeInterval, toBN, fromWei, RelayerError } = require('./utils')
|
||||
const { privateKey, minimumBalance } = require('./config')
|
||||
const { redis } = require('./modules/redis')
|
||||
const web3 = require('./modules/web3')()
|
||||
|
||||
async function main() {
|
||||
try {
|
||||
const { address } = web3.eth.accounts.privateKeyToAccount(privateKey)
|
||||
const balance = await web3.eth.getBalance(address)
|
||||
|
||||
if (toBN(balance).lt(toBN(minimumBalance))) {
|
||||
throw new Error(`Not enough balance, less than ${fromWei(minimumBalance)} ETH`)
|
||||
throw new RelayerError(`Not enough balance, less than ${fromWei(minimumBalance)} ETH`, 1)
|
||||
}
|
||||
|
||||
await redis.hset('health', { status: true, error: '' })
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
const { createClient } = require('ioredis')
|
||||
const { redisUrl } = require('../config')
|
||||
|
||||
const redis = createClient(redisUrl)
|
||||
const redisSubscribe = createClient(redisUrl)
|
||||
|
||||
module.exports = {
|
||||
redis,
|
||||
redisSubscribe,
|
||||
redisUrl,
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
const { httpRpcUrl, aggregatorAddress } = require('./config')
|
||||
const Web3 = require('web3')
|
||||
const web3 = new Web3(httpRpcUrl)
|
||||
const aggregator = new web3.eth.Contract(require('../abis/Aggregator.abi.json'), aggregatorAddress)
|
||||
const { aggregatorAddress } = require('../config')
|
||||
const web3 = require('./web3')()
|
||||
|
||||
const aggregator = new web3.eth.Contract(require('../../abis/Aggregator.abi.json'), aggregatorAddress)
|
||||
const ens = require('eth-ens-namehash')
|
||||
|
||||
class ENSResolver {
|
||||
|
@ -26,5 +26,4 @@ class ENSResolver {
|
|||
return addresses.length === 1 ? addresses[0] : addresses
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = ENSResolver
|
||||
module.exports = new ENSResolver()
|
|
@ -1,6 +1,6 @@
|
|||
const { isAddress, toChecksumAddress } = require('web3-utils')
|
||||
const { getInstance } = require('./utils')
|
||||
const { rewardAccount } = require('./config')
|
||||
const { getInstance } = require('../utils')
|
||||
const { rewardAccount } = require('../config')
|
||||
|
||||
const Ajv = require('ajv')
|
||||
const ajv = new Ajv({ format: 'fast' })
|
||||
|
@ -19,7 +19,7 @@ ajv.addKeyword('isAddress', {
|
|||
ajv.addKeyword('isKnownContract', {
|
||||
validate: (schema, data) => {
|
||||
try {
|
||||
return getInstance(data) !== null
|
||||
return !!getInstance(data)
|
||||
} catch (e) {
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
const Web3 = require('web3')
|
||||
const { oracleRpcUrl, httpRpcUrl, wsRpcUrl } = require('../config')
|
||||
const getWeb3 = (type = 'http') => {
|
||||
let url
|
||||
switch (type) {
|
||||
case 'oracle':
|
||||
url = oracleRpcUrl
|
||||
break
|
||||
case 'ws':
|
||||
url = wsRpcUrl
|
||||
return new Web3(
|
||||
new Web3.providers.WebsocketProvider(wsRpcUrl, {
|
||||
clientConfig: {
|
||||
maxReceivedFrameSize: 100000000,
|
||||
maxReceivedMessageSize: 100000000,
|
||||
},
|
||||
}),
|
||||
)
|
||||
case 'http':
|
||||
default:
|
||||
url = httpRpcUrl
|
||||
break
|
||||
}
|
||||
return new Web3(
|
||||
new Web3.providers.HttpProvider(url, {
|
||||
timeout: 200000, // ms
|
||||
}),
|
||||
)
|
||||
}
|
||||
module.exports = getWeb3
|
|
@ -1,42 +1,45 @@
|
|||
const Redis = require('ioredis')
|
||||
const { redisUrl, offchainOracleAddress, oracleRpcUrl } = require('./config')
|
||||
const { getArgsForOracle, setSafeInterval } = require('./utils')
|
||||
const redis = new Redis(redisUrl)
|
||||
const Web3 = require('web3')
|
||||
const web3 = new Web3(
|
||||
new Web3.providers.HttpProvider(oracleRpcUrl, {
|
||||
timeout: 200000, // ms
|
||||
}),
|
||||
)
|
||||
const { offchainOracleAddress } = require('./config')
|
||||
const {
|
||||
getArgsForOracle,
|
||||
setSafeInterval,
|
||||
toChecksumAddress,
|
||||
toBN,
|
||||
RelayerError,
|
||||
logRelayerError,
|
||||
} = require('./utils')
|
||||
const { redis } = require('./modules/redis')
|
||||
const web3 = require('./modules/web3')('oracle')
|
||||
|
||||
const offchainOracleABI = require('../abis/OffchainOracle.abi.json')
|
||||
|
||||
const offchainOracle = new web3.eth.Contract(offchainOracleABI, offchainOracleAddress)
|
||||
const { tokenAddresses, oneUintAmount, currencyLookup } = getArgsForOracle()
|
||||
|
||||
const { toBN } = require('web3-utils')
|
||||
|
||||
async function main() {
|
||||
try {
|
||||
const ethPrices = {}
|
||||
for (let i = 0; i < tokenAddresses.length; i++) {
|
||||
try {
|
||||
const price = await offchainOracle.methods
|
||||
.getRate(tokenAddresses[i], '0x0000000000000000000000000000000000000000')
|
||||
.call()
|
||||
const isWrap =
|
||||
toChecksumAddress(tokenAddresses[i]) ===
|
||||
toChecksumAddress('0x5d3a536E4D6DbD6114cc1Ead35777bAB948E3643')
|
||||
|
||||
const price = await offchainOracle.methods.getRateToEth(tokenAddresses[i], isWrap).call()
|
||||
const numerator = toBN(oneUintAmount[i])
|
||||
const denominator = toBN(10).pow(toBN(18)) // eth decimals
|
||||
const priceFormatted = toBN(price).mul(numerator).div(denominator)
|
||||
|
||||
ethPrices[currencyLookup[tokenAddresses[i]]] = priceFormatted.toString()
|
||||
} catch (e) {
|
||||
console.error('cant get price of ', tokenAddresses[i])
|
||||
}
|
||||
}
|
||||
|
||||
if (!Object.values(ethPrices).length) {
|
||||
throw new RelayerError('Can`t update prices', 1)
|
||||
}
|
||||
await redis.hmset('prices', ethPrices)
|
||||
console.log('Wrote following prices to redis', ethPrices)
|
||||
} catch (e) {
|
||||
await logRelayerError(redis, e)
|
||||
console.error('priceWatcher error', e)
|
||||
}
|
||||
}
|
||||
|
|
10
src/queue.js
10
src/queue.js
|
@ -1,11 +1,11 @@
|
|||
const { v4: uuid } = require('uuid')
|
||||
const Queue = require('bull')
|
||||
const Redis = require('ioredis')
|
||||
const { redisUrl } = require('./config')
|
||||
const { status } = require('./constants')
|
||||
const redis = new Redis(redisUrl)
|
||||
|
||||
const queue = new Queue('proofs', redisUrl, {
|
||||
const { netId } = require('./config')
|
||||
const { status } = require('./constants')
|
||||
const { redis, redisUrl } = require('./modules/redis')
|
||||
|
||||
const queue = new Queue(`proofs_${netId}`, redisUrl, {
|
||||
lockDuration: 300000, // Key expiration time for job locks.
|
||||
lockRenewTime: 30000, // Interval on which to acquire the job lock
|
||||
stalledInterval: 30000, // How often check for stalled jobs (use 0 for never checking).
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
const { controller, status } = require('./contollers')
|
||||
const router = require('express').Router()
|
||||
|
||||
// Add CORS headers
|
||||
router.use((req, res, next) => {
|
||||
res.header('X-Frame-Options', 'DENY')
|
||||
res.header('Access-Control-Allow-Origin', '*')
|
||||
res.header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept')
|
||||
next()
|
||||
})
|
||||
|
||||
// Log error to console but don't send it to the client to avoid leaking data
|
||||
router.use((err, req, res, next) => {
|
||||
if (err) {
|
||||
console.error(err)
|
||||
return res.sendStatus(500)
|
||||
}
|
||||
next()
|
||||
})
|
||||
|
||||
router.get('/', status.index)
|
||||
router.get('/v1/status', status.status)
|
||||
router.get('/v1/jobs/:id', status.getJob)
|
||||
router.post('/v1/tornadoWithdraw', controller.tornadoWithdraw)
|
||||
router.get('/status', status.status)
|
||||
router.post('/relay', controller.tornadoWithdraw)
|
||||
router.post('/v1/miningReward', controller.miningReward)
|
||||
router.post('/v1/miningWithdraw', controller.miningWithdraw)
|
||||
|
||||
module.exports = router
|
|
@ -1,41 +1,14 @@
|
|||
const express = require('express')
|
||||
const status = require('./status')
|
||||
const controller = require('./controller')
|
||||
const { port, rewardAccount } = require('./config')
|
||||
const { version } = require('../package.json')
|
||||
const { isAddress } = require('web3-utils')
|
||||
|
||||
const app = express()
|
||||
app.use(express.json())
|
||||
|
||||
// Add CORS headers
|
||||
app.use((req, res, next) => {
|
||||
res.header('Access-Control-Allow-Origin', '*')
|
||||
res.header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept')
|
||||
next()
|
||||
})
|
||||
|
||||
// Log error to console but don't send it to the client to avoid leaking data
|
||||
app.use((err, req, res, next) => {
|
||||
if (err) {
|
||||
console.error(err)
|
||||
return res.sendStatus(500)
|
||||
}
|
||||
next()
|
||||
})
|
||||
|
||||
app.get('/', status.index)
|
||||
app.get('/v1/status', status.status)
|
||||
app.get('/v1/jobs/:id', status.getJob)
|
||||
app.post('/v1/tornadoWithdraw', controller.tornadoWithdraw)
|
||||
app.get('/status', status.status)
|
||||
app.post('/relay', controller.tornadoWithdraw)
|
||||
app.post('/v1/miningReward', controller.miningReward)
|
||||
app.post('/v1/miningWithdraw', controller.miningWithdraw)
|
||||
const { isAddress } = require('./utils')
|
||||
const router = require('./router')
|
||||
|
||||
if (!isAddress(rewardAccount)) {
|
||||
throw new Error('No REWARD_ACCOUNT specified')
|
||||
}
|
||||
|
||||
const app = express()
|
||||
app.use(express.json())
|
||||
app.use(router)
|
||||
app.listen(port)
|
||||
console.log(`Relayer ${version} started on port ${port}`)
|
||||
|
|
|
@ -1,39 +1,35 @@
|
|||
const MerkleTree = require('fixed-merkle-tree')
|
||||
const { redisUrl, wsRpcUrl, minerMerkleTreeHeight, torn } = require('./config')
|
||||
const { poseidonHash2 } = require('./utils')
|
||||
const { toBN } = require('web3-utils')
|
||||
const Redis = require('ioredis')
|
||||
const redis = new Redis(redisUrl)
|
||||
const ENSResolver = require('./resolver')
|
||||
const resolver = new ENSResolver()
|
||||
const Web3 = require('web3')
|
||||
const web3 = new Web3(
|
||||
new Web3.providers.WebsocketProvider(wsRpcUrl, {
|
||||
clientConfig: {
|
||||
maxReceivedFrameSize: 100000000,
|
||||
maxReceivedMessageSize: 100000000,
|
||||
},
|
||||
}),
|
||||
)
|
||||
const { minerMerkleTreeHeight, torn, netId } = require('./config')
|
||||
const { poseidonHash2, toBN, logRelayerError } = require('./utils')
|
||||
const resolver = require('./modules/resolver')
|
||||
const web3 = require('./modules/web3')('ws')
|
||||
const MinerABI = require('../abis/mining.abi.json')
|
||||
const { redis } = require('./modules/redis')
|
||||
let contract
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
let tree, eventSubscription, blockSubscription
|
||||
|
||||
// todo handle the situation when we have two rewards in one block
|
||||
async function fetchEvents(from = 0, to = 'latest') {
|
||||
try {
|
||||
const events = await contract.getPastEvents('NewAccount', {
|
||||
fromBlock: from,
|
||||
toBlock: to,
|
||||
})
|
||||
return events
|
||||
.sort((a, b) => a.returnValues.index - b.returnValues.index)
|
||||
.map(e => toBN(e.returnValues.commitment))
|
||||
} catch (e) {
|
||||
console.error('error fetching events', e)
|
||||
async function fetchEvents(fromBlock, toBlock) {
|
||||
if (fromBlock <= toBlock) {
|
||||
try {
|
||||
return await contract.getPastEvents('NewAccount', {
|
||||
fromBlock,
|
||||
toBlock,
|
||||
})
|
||||
} catch (error) {
|
||||
const midBlock = (fromBlock + toBlock) >> 1
|
||||
|
||||
if (midBlock - fromBlock < 2) {
|
||||
throw new Error(`error fetching events: ${error.message}`)
|
||||
}
|
||||
|
||||
const arr1 = await fetchEvents(fromBlock, midBlock)
|
||||
const arr2 = await fetchEvents(midBlock + 1, toBlock)
|
||||
return [...arr1, ...arr2]
|
||||
}
|
||||
}
|
||||
return []
|
||||
}
|
||||
|
||||
async function processNewEvent(err, event) {
|
||||
|
@ -105,14 +101,29 @@ async function init() {
|
|||
console.log('Initializing')
|
||||
const miner = await resolver.resolve(torn.miningV2.address)
|
||||
contract = new web3.eth.Contract(MinerABI, miner)
|
||||
const block = await web3.eth.getBlockNumber()
|
||||
const events = await fetchEvents(0, block)
|
||||
tree = new MerkleTree(minerMerkleTreeHeight, events, { hashFunction: poseidonHash2 })
|
||||
|
||||
const cachedEvents = require(`../cache/accounts_farmer_${netId}.json`)
|
||||
const cachedCommitments = cachedEvents.map(e => toBN(e.commitment))
|
||||
|
||||
const toBlock = await web3.eth.getBlockNumber()
|
||||
const [{ blockNumber: fromBlock }] = cachedEvents.slice(-1)
|
||||
|
||||
const newEvents = await fetchEvents(fromBlock + 1, toBlock)
|
||||
const newCommitments = newEvents
|
||||
.sort((a, b) => a.returnValues.index - b.returnValues.index)
|
||||
.map(e => toBN(e.returnValues.commitment))
|
||||
.filter((item, index, arr) => !index || item !== arr[index - 1])
|
||||
|
||||
const commitments = cachedCommitments.concat(newCommitments)
|
||||
|
||||
tree = new MerkleTree(minerMerkleTreeHeight, commitments, { hashFunction: poseidonHash2 })
|
||||
await updateRedis()
|
||||
console.log(`Rebuilt tree with ${events.length} elements, root: ${tree.root()}`)
|
||||
eventSubscription = contract.events.NewAccount({ fromBlock: block + 1 }, processNewEvent)
|
||||
console.log(`Rebuilt tree with ${commitments.length} elements, root: ${tree.root()}`)
|
||||
|
||||
eventSubscription = contract.events.NewAccount({ fromBlock: toBlock + 1 }, processNewEvent)
|
||||
blockSubscription = web3.eth.subscribe('newBlockHeaders', processNewBlock)
|
||||
} catch (e) {
|
||||
await logRelayerError(redis, e)
|
||||
console.error('error on init treeWatcher', e.message)
|
||||
}
|
||||
}
|
||||
|
|
60
src/utils.js
60
src/utils.js
|
@ -1,6 +1,6 @@
|
|||
const { instances, netId } = require('./config')
|
||||
const { poseidon } = require('circomlib')
|
||||
const { toBN, toChecksumAddress, BN } = require('web3-utils')
|
||||
const { toBN, toChecksumAddress, BN, fromWei, isAddress, toWei } = require('web3-utils')
|
||||
|
||||
const TOKENS = {
|
||||
torn: {
|
||||
|
@ -10,19 +10,29 @@ const TOKENS = {
|
|||
},
|
||||
}
|
||||
|
||||
const addressMap = new Map()
|
||||
const instance = instances[`netId${netId}`]
|
||||
|
||||
for (const [currency, { instanceAddress, symbol, decimals }] of Object.entries(instance)) {
|
||||
Object.entries(instanceAddress).forEach(([amount, address]) =>
|
||||
addressMap.set(`${netId}_${address}`, {
|
||||
currency,
|
||||
amount,
|
||||
symbol,
|
||||
decimals,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
const sleep = ms => new Promise(res => setTimeout(res, ms))
|
||||
|
||||
function getInstance(address) {
|
||||
address = toChecksumAddress(address)
|
||||
const inst = instances[`netId${netId}`]
|
||||
for (const currency of Object.keys(inst)) {
|
||||
for (const amount of Object.keys(inst[currency].instanceAddress)) {
|
||||
if (inst[currency].instanceAddress[amount] === address) {
|
||||
return { currency, amount }
|
||||
}
|
||||
}
|
||||
const key = `${netId}_${toChecksumAddress(address)}`
|
||||
if (addressMap.has(key)) {
|
||||
return addressMap.get(key)
|
||||
} else {
|
||||
throw new Error('Unknown contact address')
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
const poseidonHash = items => toBN(poseidon(items).toString())
|
||||
|
@ -118,6 +128,27 @@ function fromDecimals(value, decimals) {
|
|||
return new BN(wei.toString(10), 10)
|
||||
}
|
||||
|
||||
class RelayerError extends Error {
|
||||
constructor(message, score = 0) {
|
||||
super(message)
|
||||
this.score = score
|
||||
}
|
||||
}
|
||||
|
||||
const logRelayerError = async (redis, e) => {
|
||||
await redis.zadd('errors', 'INCR', e.score || 1, e.message)
|
||||
}
|
||||
|
||||
const readRelayerErrors = async redis => {
|
||||
const set = await redis.zrevrange('errors', 0, -1, 'WITHSCORES')
|
||||
const errors = []
|
||||
while (set.length) {
|
||||
const [message, score] = set.splice(0, 2)
|
||||
errors.push({ message, score })
|
||||
}
|
||||
return errors
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getInstance,
|
||||
setSafeInterval,
|
||||
|
@ -126,4 +157,13 @@ module.exports = {
|
|||
when,
|
||||
getArgsForOracle,
|
||||
fromDecimals,
|
||||
toBN,
|
||||
toChecksumAddress,
|
||||
fromWei,
|
||||
toWei,
|
||||
BN,
|
||||
isAddress,
|
||||
RelayerError,
|
||||
logRelayerError,
|
||||
readRelayerErrors,
|
||||
}
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
const fs = require('fs')
|
||||
const Web3 = require('web3')
|
||||
const { toBN, toWei, fromWei, toChecksumAddress } = require('web3-utils')
|
||||
const MerkleTree = require('fixed-merkle-tree')
|
||||
const Redis = require('ioredis')
|
||||
const { GasPriceOracle } = require('gas-price-oracle')
|
||||
const { Utils, Controller } = require('tornado-anonymity-mining')
|
||||
|
||||
|
@ -11,24 +8,35 @@ const miningABI = require('../abis/mining.abi.json')
|
|||
const tornadoABI = require('../abis/tornadoABI.json')
|
||||
const tornadoProxyABI = require('../abis/tornadoProxyABI.json')
|
||||
const { queue } = require('./queue')
|
||||
const { poseidonHash2, getInstance, fromDecimals, sleep } = require('./utils')
|
||||
const {
|
||||
poseidonHash2,
|
||||
getInstance,
|
||||
fromDecimals,
|
||||
sleep,
|
||||
toBN,
|
||||
toWei,
|
||||
fromWei,
|
||||
toChecksumAddress,
|
||||
RelayerError,
|
||||
logRelayerError,
|
||||
} = require('./utils')
|
||||
const { jobType, status } = require('./constants')
|
||||
const {
|
||||
torn,
|
||||
netId,
|
||||
redisUrl,
|
||||
gasLimits,
|
||||
instances,
|
||||
privateKey,
|
||||
httpRpcUrl,
|
||||
oracleRpcUrl,
|
||||
baseFeeReserve,
|
||||
miningServiceFee,
|
||||
tornadoServiceFee,
|
||||
tornadoGoerliProxy,
|
||||
} = require('./config')
|
||||
const ENSResolver = require('./resolver')
|
||||
const resolver = new ENSResolver()
|
||||
const resolver = require('./modules/resolver')
|
||||
const { TxManager } = require('tx-manager')
|
||||
const { redis, redisSubscribe } = require('./modules/redis')
|
||||
const getWeb3 = require('./modules/web3')
|
||||
|
||||
let web3
|
||||
let currentTx
|
||||
|
@ -38,8 +46,6 @@ let txManager
|
|||
let controller
|
||||
let swap
|
||||
let minerContract
|
||||
const redis = new Redis(redisUrl)
|
||||
const redisSubscribe = new Redis(redisUrl)
|
||||
const gasPriceOracle = new GasPriceOracle({ defaultRpc: oracleRpcUrl })
|
||||
|
||||
async function fetchTree() {
|
||||
|
@ -74,12 +80,18 @@ async function fetchTree() {
|
|||
|
||||
async function start() {
|
||||
try {
|
||||
web3 = new Web3(httpRpcUrl)
|
||||
await clearErrors()
|
||||
web3 = getWeb3()
|
||||
const { CONFIRMATIONS, MAX_GAS_PRICE } = process.env
|
||||
txManager = new TxManager({
|
||||
privateKey,
|
||||
rpcUrl: httpRpcUrl,
|
||||
config: { CONFIRMATIONS, MAX_GAS_PRICE, THROW_ON_REVERT: false },
|
||||
config: {
|
||||
CONFIRMATIONS,
|
||||
MAX_GAS_PRICE,
|
||||
THROW_ON_REVERT: false,
|
||||
BASE_FEE_RESERVE_PERCENTAGE: baseFeeReserve,
|
||||
},
|
||||
})
|
||||
swap = new web3.eth.Contract(swapABI, await resolver.resolve(torn.rewardSwap.address))
|
||||
minerContract = new web3.eth.Contract(miningABI, await resolver.resolve(torn.miningV2.address))
|
||||
|
@ -94,6 +106,7 @@ async function start() {
|
|||
queue.process(processJob)
|
||||
console.log('Worker started')
|
||||
} catch (e) {
|
||||
await logRelayerError(redis, e)
|
||||
console.error('error on start worker', e.message)
|
||||
}
|
||||
}
|
||||
|
@ -105,17 +118,29 @@ function checkFee({ data }) {
|
|||
return checkMiningFee(data)
|
||||
}
|
||||
|
||||
async function checkTornadoFee({ args, contract }) {
|
||||
const { currency, amount } = getInstance(contract)
|
||||
const { decimals } = instances[`netId${netId}`][currency]
|
||||
const [fee, refund] = [args[4], args[5]].map(toBN)
|
||||
async function getGasPrice() {
|
||||
const block = await web3.eth.getBlock('latest')
|
||||
|
||||
if (block && block.baseFeePerGas) {
|
||||
return toBN(block.baseFeePerGas)
|
||||
}
|
||||
|
||||
const { fast } = await gasPriceOracle.gasPrices()
|
||||
return toBN(toWei(fast.toString(), 'gwei'))
|
||||
}
|
||||
|
||||
async function checkTornadoFee({ args, contract }) {
|
||||
const { currency, amount, decimals } = getInstance(contract)
|
||||
const [fee, refund] = [args[4], args[5]].map(toBN)
|
||||
const gasPrice = await getGasPrice()
|
||||
|
||||
const ethPrice = await redis.hget('prices', currency)
|
||||
const expense = toBN(toWei(fast.toString(), 'gwei')).mul(toBN(gasLimits[jobType.TORNADO_WITHDRAW]))
|
||||
const expense = gasPrice.mul(toBN(gasLimits[jobType.TORNADO_WITHDRAW]))
|
||||
|
||||
const feePercent = toBN(fromDecimals(amount, decimals))
|
||||
.mul(toBN(parseInt(tornadoServiceFee * 1e10)))
|
||||
.div(toBN(1e10 * 100))
|
||||
|
||||
let desiredFee
|
||||
switch (currency) {
|
||||
case 'eth': {
|
||||
|
@ -138,17 +163,20 @@ async function checkTornadoFee({ args, contract }) {
|
|||
fromWei(feePercent.toString()),
|
||||
)
|
||||
if (fee.lt(desiredFee)) {
|
||||
throw new Error('Provided fee is not enough. Probably it is a Gas Price spike, try to resubmit.')
|
||||
throw new RelayerError(
|
||||
'Provided fee is not enough. Probably it is a Gas Price spike, try to resubmit.',
|
||||
0,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async function checkMiningFee({ args }) {
|
||||
const { fast } = await gasPriceOracle.gasPrices()
|
||||
const gasPrice = await getGasPrice()
|
||||
const ethPrice = await redis.hget('prices', 'torn')
|
||||
const isMiningReward = currentJob.data.type === jobType.MINING_REWARD
|
||||
const providedFee = isMiningReward ? toBN(args.fee) : toBN(args.extData.fee)
|
||||
|
||||
const expense = toBN(toWei(fast.toString(), 'gwei')).mul(toBN(gasLimits[currentJob.data.type]))
|
||||
const expense = gasPrice.mul(toBN(gasLimits[currentJob.data.type]))
|
||||
const expenseInTorn = expense.mul(toBN(1e18)).div(toBN(ethPrice))
|
||||
// todo make aggregator for ethPrices and rewardSwap data
|
||||
const balance = await swap.methods.tornVirtualBalance().call()
|
||||
|
@ -170,19 +198,17 @@ async function checkMiningFee({ args }) {
|
|||
serviceFeePercent.toString(),
|
||||
)
|
||||
if (toBN(providedFee).lt(desiredFee)) {
|
||||
throw new Error('Provided fee is not enough. Probably it is a Gas Price spike, try to resubmit.')
|
||||
throw new RelayerError('Provided fee is not enough. Probably it is a Gas Price spike, try to resubmit.')
|
||||
}
|
||||
}
|
||||
|
||||
async function getProxyContract() {
|
||||
let proxyAddress
|
||||
|
||||
if (netId === 5) {
|
||||
proxyAddress = tornadoGoerliProxy
|
||||
} else {
|
||||
proxyAddress = await resolver.resolve(torn.tornadoProxy.address)
|
||||
proxyAddress = await resolver.resolve(torn.tornadoRouter.address)
|
||||
}
|
||||
|
||||
const contract = new web3.eth.Contract(tornadoProxyABI, proxyAddress)
|
||||
|
||||
return {
|
||||
|
@ -241,7 +267,7 @@ async function isOutdatedTreeRevert(receipt, currentTx) {
|
|||
async function processJob(job) {
|
||||
try {
|
||||
if (!jobType[job.data.type]) {
|
||||
throw new Error(`Unknown job type: ${job.data.type}`)
|
||||
throw new RelayerError(`Unknown job type: ${job.data.type}`)
|
||||
}
|
||||
currentJob = job
|
||||
await updateStatus(status.ACCEPTED)
|
||||
|
@ -250,7 +276,7 @@ async function processJob(job) {
|
|||
} catch (e) {
|
||||
console.error('processJob', e.message)
|
||||
await updateStatus(status.FAILED)
|
||||
throw e
|
||||
throw new RelayerError(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -283,10 +309,10 @@ async function submitTx(job, retry = 0) {
|
|||
await updateStatus(status.RESUBMITTED)
|
||||
await submitTx(job, retry + 1)
|
||||
} else {
|
||||
throw new Error('Tree update retry limit exceeded')
|
||||
throw new RelayerError('Tree update retry limit exceeded')
|
||||
}
|
||||
} else {
|
||||
throw new Error('Submitted transaction failed')
|
||||
throw new RelayerError('Submitted transaction failed')
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
|
@ -302,10 +328,10 @@ async function submitTx(job, retry = 0) {
|
|||
console.log('Tree is still not up to date, resubmitting')
|
||||
await submitTx(job, retry + 1)
|
||||
} else {
|
||||
throw new Error('Tree update retry limit exceeded')
|
||||
throw new RelayerError('Tree update retry limit exceeded')
|
||||
}
|
||||
} else {
|
||||
throw new Error(`Revert by smart contract ${e.message}`)
|
||||
throw new RelayerError(`Revert by smart contract ${e.message}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -328,4 +354,9 @@ async function updateStatus(status) {
|
|||
await currentJob.update(currentJob.data)
|
||||
}
|
||||
|
||||
async function clearErrors() {
|
||||
console.log('Errors list cleared')
|
||||
await redis.del('errors')
|
||||
}
|
||||
|
||||
start()
|
||||
|
|
|
@ -4,7 +4,7 @@ const {
|
|||
getTornadoWithdrawInputError,
|
||||
getMiningRewardInputError,
|
||||
getMiningWithdrawInputError,
|
||||
} = require('../src/validator')
|
||||
} = require('../src/modules/validator')
|
||||
|
||||
describe('Validator', () => {
|
||||
describe('#getTornadoWithdrawInputError', () => {
|
||||
|
@ -19,7 +19,13 @@ describe('Validator', () => {
|
|||
'.proof should match pattern "^0x[a-fA-F0-9]{512}$"',
|
||||
)
|
||||
})
|
||||
|
||||
it('should throw if unknown contract', () => {
|
||||
const malformedData = { ...withdrawData }
|
||||
malformedData.contract = '0xf17f52151ebef6c7334fad080c5704d77216b732'
|
||||
getTornadoWithdrawInputError(malformedData).should.be.equal(
|
||||
'.contract should pass "isKnownContract" keyword validation',
|
||||
)
|
||||
})
|
||||
it('should throw something is missing', () => {
|
||||
const malformedData = { ...withdrawData }
|
||||
delete malformedData.proof
|
||||
|
|
Loading…
Reference in New Issue