Merge pull request #9135

c069c04ed blockchain sync: reduce disk writes from 2 to 1 per tx (jeffro256)
This commit is contained in:
tobtoht 2025-03-10 16:16:18 +00:00
commit 44523cafff
No known key found for this signature in database
GPG Key ID: E45B10DD027D2472
25 changed files with 1185 additions and 1171 deletions

View File

@ -173,7 +173,9 @@ int check_flush(cryptonote::core &core, std::vector<block_complete_entry> &block
for(auto& tx_blob: block_entry.txs)
{
tx_verification_context tvc = AUTO_VAL_INIT(tvc);
core.handle_incoming_tx(tx_blob, tvc, relay_method::block, true);
CHECK_AND_ASSERT_THROW_MES(tx_blob.prunable_hash == crypto::null_hash,
"block entry must not contain pruned txs");
core.handle_incoming_tx(tx_blob.blob, tvc, relay_method::block, true);
if(tvc.m_verifivation_failed)
{
cryptonote::transaction transaction;
@ -189,8 +191,9 @@ int check_flush(cryptonote::core &core, std::vector<block_complete_entry> &block
// process block
block_verification_context bvc = {};
pool_supplement ps{};
core.handle_incoming_block(block_entry.block, pblocks.empty() ? NULL : &pblocks[blockidx++], bvc, false); // <--- process block
core.handle_incoming_block(block_entry.block, pblocks.empty() ? NULL : &pblocks[blockidx++], bvc, ps, false); // <--- process block
if(bvc.m_verifivation_failed)
{

View File

@ -234,6 +234,7 @@ namespace cryptonote
CHECK_AND_ASSERT_MES(r, false, "Failed to parse transaction from blob");
CHECK_AND_ASSERT_MES(expand_transaction_1(tx, false), false, "Failed to expand transaction data");
tx.invalidate_hashes();
tx.set_blob_size(tx_blob.size());
//TODO: validate tx
return get_transaction_hash(tx, tx_hash);
@ -497,6 +498,19 @@ namespace cryptonote
return get_transaction_weight(tx, blob_size);
}
//---------------------------------------------------------------
uint64_t get_transaction_blob_size(const transaction& tx)
{
if (!tx.is_blob_size_valid())
{
const cryptonote::blobdata tx_blob = tx_to_blob(tx);
tx.set_blob_size(tx_blob.size());
}
CHECK_AND_ASSERT_THROW_MES(tx.is_blob_size_valid(), "BUG: blob size valid not set");
return tx.blob_size;
}
//---------------------------------------------------------------
bool get_tx_fee(const transaction& tx, uint64_t & fee)
{
if (tx.version > 1)

View File

@ -137,6 +137,7 @@ namespace cryptonote
uint64_t get_transaction_weight(const transaction &tx);
uint64_t get_transaction_weight(const transaction &tx, size_t blob_size);
uint64_t get_pruned_transaction_weight(const transaction &tx);
uint64_t get_transaction_blob_size(const transaction& tx);
bool check_money_overflow(const transaction& tx);
bool check_outs_overflow(const transaction& tx);

View File

@ -69,6 +69,7 @@ namespace cryptonote
bool m_marked_as_orphaned;
bool m_already_exists;
bool m_partial_block_reward;
bool m_bad_pow; // if bad pow, bad peer outright for DoS protection
bool m_bad_pow; // if bad pow, ban peer outright for DoS protection
bool m_missing_txs; // set if, during verif, we don't have all the necessary txs available
};
}

View File

@ -41,6 +41,7 @@
#include "blockchain.h"
#include "blockchain_db/blockchain_db.h"
#include "cryptonote_basic/cryptonote_boost_serialization.h"
#include "cryptonote_basic/events.h"
#include "cryptonote_config.h"
#include "cryptonote_basic/miner.h"
#include "hardforks/hardforks.h"
@ -643,12 +644,14 @@ block Blockchain::pop_block_from_blockchain()
// in hf_versions.
uint8_t version = get_ideal_hard_fork_version(m_db->height());
// We assume that if they were in a block, the transactions are already
// known to the network as a whole. However, if we had mined that block,
// that might not be always true. Unlikely though, and always relaying
// these again might cause a spike of traffic as many nodes re-relay
// all the transactions in a popped block when a reorg happens.
bool r = m_tx_pool.add_tx(tx, tvc, relay_method::block, true, version);
// We assume that if they were in a block, the transactions are already known to the network
// as a whole. However, if we had mined that block, that might not be always true. Unlikely
// though, and always relaying these again might cause a spike of traffic as many nodes
// re-relay all the transactions in a popped block when a reorg happens. You might notice that
// we also set the "nic_verified_hf_version" paramater. Since we know we took this transaction
// from the mempool earlier in this function call, when the mempool has the same current fork
// version, we can return it without re-verifying the consensus rules on it.
const bool r = m_tx_pool.add_tx(tx, tvc, relay_method::block, true, version, version);
if (!r)
{
LOG_ERROR("Error returning transaction to tx_pool");
@ -660,7 +663,6 @@ block Blockchain::pop_block_from_blockchain()
m_blocks_longhash_table.clear();
m_scan_table.clear();
m_blocks_txs_check.clear();
uint64_t top_block_height;
crypto::hash top_block_hash = get_tail_id(top_block_height);
@ -1077,7 +1079,7 @@ bool Blockchain::rollback_blockchain_switching(std::list<block>& original_chain,
for (auto& bl : original_chain)
{
block_verification_context bvc = {};
bool r = handle_block_to_main_chain(bl, bvc, false);
bool r = handle_block_to_main_chain(bl, bvc);
CHECK_AND_ASSERT_MES(r && bvc.m_added_to_main_chain, false, "PANIC! failed to add (again) block while chain switching during the rollback!");
}
@ -1130,7 +1132,7 @@ bool Blockchain::switch_to_alternative_blockchain(std::list<block_extended_info>
block_verification_context bvc = {};
// add block to main chain
bool r = handle_block_to_main_chain(bei.bl, bvc, false);
bool r = handle_block_to_main_chain(bei.bl, bvc);
// if adding block to main chain failed, rollback to previous state and
// return false
@ -1165,7 +1167,8 @@ bool Blockchain::switch_to_alternative_blockchain(std::list<block_extended_info>
for (auto& old_ch_ent : disconnected_chain)
{
block_verification_context bvc = {};
bool r = handle_alternative_block(old_ch_ent, get_block_hash(old_ch_ent), bvc);
pool_supplement ps{};
bool r = handle_alternative_block(old_ch_ent, get_block_hash(old_ch_ent), bvc, ps);
if(!r)
{
MERROR("Failed to push ex-main chain blocks to alternative chain ");
@ -1861,7 +1864,8 @@ bool Blockchain::build_alt_chain(const crypto::hash &prev_id, std::list<block_ex
// if that chain is long enough to become the main chain and re-org accordingly
// if so. If not, we need to hang on to the block in case it becomes part of
// a long forked chain eventually.
bool Blockchain::handle_alternative_block(const block& b, const crypto::hash& id, block_verification_context& bvc)
bool Blockchain::handle_alternative_block(const block& b, const crypto::hash& id,
block_verification_context& bvc, pool_supplement& extra_block_txs)
{
LOG_PRINT_L3("Blockchain::" << __func__);
CRITICAL_REGION_LOCAL(m_blockchain_lock);
@ -1993,6 +1997,47 @@ bool Blockchain::handle_alternative_block(const block& b, const crypto::hash& id
}
bei.cumulative_difficulty += current_diff;
// Now that we have the PoW verification out of the way, verify all pool supplement txs
tx_verification_context tvc{};
if (!ver_non_input_consensus(extra_block_txs, tvc, hf_version))
{
MERROR_VER("Transaction pool supplement verification failure for alt block " << id);
bvc.m_verifivation_failed = true;
return false;
}
// Add pool supplement txs to the main mempool with relay_method::block
CRITICAL_REGION_LOCAL(m_tx_pool);
for (auto& extra_block_tx : extra_block_txs.txs_by_txid)
{
const crypto::hash& txid = extra_block_tx.first;
transaction& tx = extra_block_tx.second.first;
const blobdata &tx_blob = extra_block_tx.second.second;
tx_verification_context tvc{};
if ((!m_tx_pool.have_tx(txid, relay_category::legacy) &&
!m_db->tx_exists(txid) &&
!m_tx_pool.add_tx(tx, tvc, relay_method::block, /*relayed=*/true, hf_version, hf_version))
|| tvc.m_verifivation_failed)
{
MERROR_VER("Transaction " << txid <<
" in pool supplement failed to enter main pool for alt block " << id);
bvc.m_verifivation_failed = true;
return false;
}
// If new incoming tx in alt block passed verification and entered the pool, notify ZMQ
if (tvc.m_added_to_pool)
notify_txpool_event({txpool_event{
.tx = tx,
.hash = txid,
.blob_size = tx_blob.size(),
.weight = get_transaction_weight(tx),
.res = true}});
}
extra_block_txs.txs_by_txid.clear();
extra_block_txs.nic_verified_hf_version = 0;
bei.block_cumulative_weight = cryptonote::get_transaction_weight(b.miner_tx);
for (const crypto::hash &txid: b.tx_hashes)
{
@ -2790,11 +2835,12 @@ bool Blockchain::have_block(const crypto::hash& id, int *where) const
return have_block_unlocked(id, where);
}
//------------------------------------------------------------------
bool Blockchain::handle_block_to_main_chain(const block& bl, block_verification_context& bvc, bool notify/* = true*/)
bool Blockchain::handle_block_to_main_chain(const block& bl, block_verification_context& bvc)
{
LOG_PRINT_L3("Blockchain::" << __func__);
crypto::hash id = get_block_hash(bl);
return handle_block_to_main_chain(bl, id, bvc, notify);
pool_supplement ps{};
return handle_block_to_main_chain(bl, id, bvc, ps);
}
//------------------------------------------------------------------
size_t Blockchain::get_total_transactions() const
@ -2905,24 +2951,6 @@ bool Blockchain::get_tx_outputs_gindexs(const crypto::hash& tx_id, std::vector<u
return true;
}
//------------------------------------------------------------------
void Blockchain::on_new_tx_from_block(const cryptonote::transaction &tx)
{
#if defined(PER_BLOCK_CHECKPOINT)
// check if we're doing per-block checkpointing
if (m_db->height() < m_blocks_hash_check.size())
{
TIME_MEASURE_START(a);
m_blocks_txs_check.push_back(get_transaction_hash(tx));
TIME_MEASURE_FINISH(a);
if(m_show_time_stats)
{
size_t ring_size = !tx.vin.empty() && tx.vin[0].type() == typeid(txin_to_key) ? boost::get<txin_to_key>(tx.vin[0]).key_offsets.size() : 0;
MINFO("HASH: " << "-" << " I/M/O: " << tx.vin.size() << "/" << ring_size << "/" << tx.vout.size() << " H: " << 0 << " chcktx: " << a);
}
}
#endif
}
//------------------------------------------------------------------
//FIXME: it seems this function is meant to be merely a wrapper around
// another function of the same name, this one adding one bit of
// functionality. Should probably move anything more than that
@ -2962,12 +2990,9 @@ bool Blockchain::check_tx_inputs(transaction& tx, uint64_t& max_used_block_heigh
return true;
}
//------------------------------------------------------------------
bool Blockchain::check_tx_outputs(const transaction& tx, tx_verification_context &tvc) const
bool Blockchain::check_tx_outputs(const transaction& tx, tx_verification_context &tvc, std::uint8_t hf_version)
{
LOG_PRINT_L3("Blockchain::" << __func__);
CRITICAL_REGION_LOCAL(m_blockchain_lock);
const uint8_t hf_version = m_hardfork->get_current_version();
// from hard fork 2, we forbid dust and compound outputs
if (hf_version >= 2) {
@ -3929,26 +3954,6 @@ bool Blockchain::check_block_timestamp(const block& b, uint64_t& median_ts) cons
return check_block_timestamp(timestamps, b, median_ts);
}
//------------------------------------------------------------------
void Blockchain::return_tx_to_pool(std::vector<std::pair<transaction, blobdata>> &txs)
{
uint8_t version = get_current_hard_fork_version();
for (auto& tx : txs)
{
cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc);
// We assume that if they were in a block, the transactions are already
// known to the network as a whole. However, if we had mined that block,
// that might not be always true. Unlikely though, and always relaying
// these again might cause a spike of traffic as many nodes re-relay
// all the transactions in a popped block when a reorg happens.
const size_t weight = get_transaction_weight(tx.first, tx.second.size());
const crypto::hash tx_hash = get_transaction_hash(tx.first);
if (!m_tx_pool.add_tx(tx.first, tx_hash, tx.second, weight, tvc, relay_method::block, true, version))
{
MERROR("Failed to return taken transaction with hash: " << get_transaction_hash(tx.first) << " to tx_pool");
}
}
}
//------------------------------------------------------------------
bool Blockchain::flush_txes_from_pool(const std::vector<crypto::hash> &txids)
{
CRITICAL_REGION_LOCAL(m_tx_pool);
@ -3974,7 +3979,8 @@ bool Blockchain::flush_txes_from_pool(const std::vector<crypto::hash> &txids)
// Needs to validate the block and acquire each transaction from the
// transaction mem_pool, then pass the block and transactions to
// m_db->add_block()
bool Blockchain::handle_block_to_main_chain(const block& bl, const crypto::hash& id, block_verification_context& bvc, bool notify/* = true*/)
bool Blockchain::handle_block_to_main_chain(const block& bl, const crypto::hash& id,
block_verification_context& bvc, pool_supplement& extra_block_txs)
{
LOG_PRINT_L3("Blockchain::" << __func__);
@ -4126,10 +4132,66 @@ leave:
goto leave;
}
// verify all non-input consensus rules for txs inside the pool supplement (if not inside checkpoint zone)
#if defined(PER_BLOCK_CHECKPOINT)
if (!fast_check)
#endif
{
tx_verification_context tvc{};
// If fail non-input consensus rule checking...
if (!ver_non_input_consensus(extra_block_txs, tvc, hf_version))
{
MERROR_VER("Pool supplement provided for block with id: " << id << " failed to pass validation");
bvc.m_verifivation_failed = true;
goto leave;
}
}
size_t coinbase_weight = get_transaction_weight(bl.miner_tx);
size_t cumulative_block_weight = coinbase_weight;
std::vector<std::pair<transaction, blobdata>> txs;
// txid weight mempool?
std::vector<std::tuple<crypto::hash, size_t, bool>> txs_meta;
// This will be the data sent to the ZMQ pool listeners for txs which skipped the mempool
std::vector<txpool_event> txpool_events;
// this lambda returns relevant txs back to the mempool
auto return_txs_to_pool = [this, &txs, &txs_meta, &hf_version]()
{
if (txs_meta.size() != txs.size())
{
MERROR("BUG: txs_meta and txs not matching size!!!");
return;
}
for (size_t i = 0; i < txs.size(); ++i)
{
// if this transaction wasn't ever in the pool, don't return it back to the pool
const bool found_in_pool = std::get<2>(txs_meta[i]);
if (!found_in_pool)
continue;
transaction &tx = txs[i].first;
const crypto::hash &txid = std::get<0>(txs_meta[i]);
const blobdata &tx_blob = txs[i].second;
const size_t tx_weight = std::get<1>(txs_meta[i]);
// We assume that if they were in a block, the transactions are already known to the network
// as a whole. However, if we had mined that block, that might not be always true. Unlikely
// though, and always relaying these again might cause a spike of traffic as many nodes
// re-relay all the transactions in a popped block when a reorg happens. You might notice that
// we also set the "nic_verified_hf_version" paramater. Since we know we took this transaction
// from the mempool earlier in this function call, when the mempool has the same current fork
// version, we can return it without re-verifying the consensus rules on it.
cryptonote::tx_verification_context tvc{};
if (!m_tx_pool.add_tx(tx, txid, tx_blob, tx_weight, tvc, relay_method::block, true,
hf_version, hf_version))
MERROR("Failed to return taken transaction with hash: " << txid << " to tx_pool");
}
};
key_images_container keys;
uint64_t fee_summary = 0;
@ -4142,18 +4204,14 @@ leave:
// XXX old code adds miner tx here
size_t tx_index = 0;
// Iterate over the block's transaction hashes, grabbing each
// from the tx_pool and validating them. Each is then added
// from the tx_pool (or from extra_block_txs) and validating them. Each is then added
// to txs. Keys spent in each are added to <keys> by the double spend check.
txs.reserve(bl.tx_hashes.size());
txs_meta.reserve(bl.tx_hashes.size());
txpool_events.reserve(bl.tx_hashes.size());
for (const crypto::hash& tx_id : bl.tx_hashes)
{
transaction tx_tmp;
blobdata txblob;
size_t tx_weight = 0;
uint64_t fee = 0;
bool relayed = false, do_not_relay = false, double_spend_seen = false, pruned = false;
TIME_MEASURE_START(aa);
// XXX old code does not check whether tx exists
@ -4161,21 +4219,73 @@ leave:
{
MERROR("Block with id: " << id << " attempting to add transaction already in blockchain with id: " << tx_id);
bvc.m_verifivation_failed = true;
return_tx_to_pool(txs);
goto leave;
return_txs_to_pool();
return false;
}
TIME_MEASURE_FINISH(aa);
t_exists += aa;
TIME_MEASURE_START(bb);
// get transaction with hash <tx_id> from tx_pool
if(!m_tx_pool.take_tx(tx_id, tx_tmp, txblob, tx_weight, fee, relayed, do_not_relay, double_spend_seen, pruned))
// get transaction with hash <tx_id> from m_tx_pool or extra_block_txs
// tx info we want:
// * tx as `cryptonote::transaction`
// * blob
// * weight
// * fee
// * is pruned?
txs.emplace_back();
transaction &tx = txs.back().first;
blobdata &txblob = txs.back().second;
size_t tx_weight{};
uint64_t fee{};
bool pruned{};
/*
* Try pulling transaction data from the mempool proper first. If that fails, then try pulling
* from the block supplement. We add txs pulled from the block to the txpool events for future
* notifications, since if the tx skipped the mempool, then listeners have not yet received a
* notification for this tx.
*/
bool _unused1, _unused2, _unused3;
const bool found_tx_in_pool{
m_tx_pool.take_tx(tx_id, tx, txblob, tx_weight, fee,
_unused1, _unused2, _unused3, pruned, /*suppress_missing_msgs=*/true)
};
bool find_tx_failure{!found_tx_in_pool};
if (!found_tx_in_pool) // if not in mempool:
{
MERROR_VER("Block with id: " << id << " has at least one unknown transaction with id: " << tx_id);
const auto extra_txs_it{extra_block_txs.txs_by_txid.find(tx_id)};
if (extra_txs_it != extra_block_txs.txs_by_txid.end()) // if in block supplement:
{
tx = std::move(extra_txs_it->second.first);
txblob = std::move(extra_txs_it->second.second);
tx_weight = get_transaction_weight(tx, txblob.size());
fee = get_tx_fee(tx);
pruned = tx.pruned;
extra_block_txs.txs_by_txid.erase(extra_txs_it);
txpool_events.emplace_back(txpool_event{tx, tx_id, txblob.size(), tx_weight, true});
find_tx_failure = false;
}
}
// @TODO: We should move this section (checking if the daemon has all txs from the block) to
// right after the PoW check. Since it's now expected the node will sometimes not have all txs
// in its pool at this point nor the txs included as fluffy txs (and will need to re-request
// missing fluffy txs), then the node will sometimes waste cycles doing verification for some
// txs twice.
if (find_tx_failure) // did not find txid in mempool or provided extra block txs
{
const bool fully_supplemented_block = extra_block_txs.txs_by_txid.size() >= bl.tx_hashes.size();
if (fully_supplemented_block)
MERROR_VER("Block with id: " << id << " has at least one unknown transaction with id: " << tx_id);
else
LOG_PRINT_L2("Block with id: " << id << " has at least one unknown transaction with id: " << tx_id);
txs.pop_back(); // We push to the back preemptively. On fail, we need txs & txs_meta to match size
bvc.m_verifivation_failed = true;
return_tx_to_pool(txs);
goto leave;
bvc.m_missing_txs = true;
return_txs_to_pool();
return false;
}
if (pruned)
++n_pruned;
@ -4185,8 +4295,7 @@ leave:
// add the transaction to the temp list of transactions, so we can either
// store the list of transactions all at once or return the ones we've
// taken from the tx_pool back to it if the block fails verification.
txs.push_back(std::make_pair(std::move(tx_tmp), std::move(txblob)));
transaction &tx = txs.back().first;
txs_meta.emplace_back(tx_id, tx_weight, found_tx_in_pool);
TIME_MEASURE_START(dd);
// FIXME: the storage should not be responsible for validation.
@ -4219,30 +4328,12 @@ leave:
//TODO: why is this done? make sure that keeping invalid blocks makes sense.
add_block_as_invalid(bl, id);
MERROR_VER("Block with id " << id << " added as invalid because of wrong inputs in transactions");
MERROR_VER("tx_index " << tx_index << ", m_blocks_txs_check " << m_blocks_txs_check.size() << ":");
for (const auto &h: m_blocks_txs_check) MERROR_VER(" " << h);
bvc.m_verifivation_failed = true;
return_tx_to_pool(txs);
goto leave;
return_txs_to_pool();
return false;
}
}
#if defined(PER_BLOCK_CHECKPOINT)
else
{
// ND: if fast_check is enabled for blocks, there is no need to check
// the transaction inputs, but do some sanity checks anyway.
if (tx_index >= m_blocks_txs_check.size() || memcmp(&m_blocks_txs_check[tx_index++], &tx_id, sizeof(tx_id)) != 0)
{
MERROR_VER("Block with id: " << id << " has at least one transaction (id: " << tx_id << ") with wrong inputs.");
//TODO: why is this done? make sure that keeping invalid blocks makes sense.
add_block_as_invalid(bl, id);
MERROR_VER("Block with id " << id << " added as invalid because of wrong inputs in transactions");
bvc.m_verifivation_failed = true;
return_tx_to_pool(txs);
goto leave;
}
}
#endif
TIME_MEASURE_FINISH(cc);
t_checktx += cc;
fee_summary += fee;
@ -4260,8 +4351,6 @@ leave:
cumulative_block_weight = m_blocks_hash_check[blockchain_height].second;
}
m_blocks_txs_check.clear();
TIME_MEASURE_START(vmt);
uint64_t base_reward = 0;
uint64_t already_generated_coins = blockchain_height ? m_db->get_block_already_generated_coins(blockchain_height - 1) : 0;
@ -4269,8 +4358,8 @@ leave:
{
MERROR_VER("Block with id: " << id << " has incorrect miner transaction");
bvc.m_verifivation_failed = true;
return_tx_to_pool(txs);
goto leave;
return_txs_to_pool();
return false;
}
TIME_MEASURE_FINISH(vmt);
@ -4308,7 +4397,7 @@ leave:
LOG_ERROR("Error adding block with hash: " << id << " to blockchain, what = " << e.what());
m_batch_success = false;
bvc.m_verifivation_failed = true;
return_tx_to_pool(txs);
return_txs_to_pool();
return false;
}
catch (const std::exception& e)
@ -4317,7 +4406,7 @@ leave:
LOG_ERROR("Error adding block with hash: " << id << " to blockchain, what = " << e.what());
m_batch_success = false;
bvc.m_verifivation_failed = true;
return_tx_to_pool(txs);
return_txs_to_pool();
return false;
}
}
@ -4370,6 +4459,9 @@ leave:
const crypto::hash seedhash = get_block_id_by_height(crypto::rx_seedheight(new_height));
send_miner_notifications(new_height, seedhash, id, already_generated_coins);
// Make sure that txpool notifications happen BEFORE block notifications
notify_txpool_event(std::move(txpool_events));
for (const auto& notifier: m_block_notifiers)
notifier(new_height - 1, {std::addressof(bl), 1});
@ -4497,7 +4589,14 @@ bool Blockchain::update_next_cumulative_weight_limit(uint64_t *long_term_effecti
return true;
}
//------------------------------------------------------------------
bool Blockchain::add_new_block(const block& bl, block_verification_context& bvc)
bool Blockchain::add_new_block(const block& bl_, block_verification_context& bvc)
{
pool_supplement ps{};
return add_new_block(bl_, bvc, ps);
}
//------------------------------------------------------------------
bool Blockchain::add_new_block(const block& bl, block_verification_context& bvc,
pool_supplement& extra_block_txs)
{
try
{
@ -4511,7 +4610,6 @@ bool Blockchain::add_new_block(const block& bl, block_verification_context& bvc)
{
LOG_PRINT_L3("block with id = " << id << " already exists");
bvc.m_already_exists = true;
m_blocks_txs_check.clear();
return false;
}
@ -4521,14 +4619,12 @@ bool Blockchain::add_new_block(const block& bl, block_verification_context& bvc)
//chain switching or wrong block
bvc.m_added_to_main_chain = false;
rtxn_guard.stop();
bool r = handle_alternative_block(bl, id, bvc);
m_blocks_txs_check.clear();
return r;
return handle_alternative_block(bl, id, bvc, extra_block_txs);
//never relay alternative blocks
}
rtxn_guard.stop();
return handle_block_to_main_chain(bl, id, bvc);
return handle_block_to_main_chain(bl, id, bvc, extra_block_txs);
}
catch (const std::exception &e)
@ -4698,7 +4794,6 @@ bool Blockchain::cleanup_handle_incoming_blocks(bool force_sync)
TIME_MEASURE_FINISH(t1);
m_blocks_longhash_table.clear();
m_scan_table.clear();
m_blocks_txs_check.clear();
// when we're well clear of the precomputed hashes, free the memory
if (!m_blocks_hash_check.empty() && m_db->height() > m_blocks_hash_check.size() + 4096)
@ -5220,6 +5315,27 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete
return true;
}
void Blockchain::prepare_handle_incoming_block_no_preprocess(const size_t block_byte_estimate)
{
// acquire locks
m_tx_pool.lock();
CRITICAL_REGION_LOCAL1(m_blockchain_lock);
// increment sync byte counter to trigger sync against database backing store
// later in cleanup_handle_incoming_blocks()
m_bytes_to_sync += block_byte_estimate;
// spin until we start a batch
while (!m_db->batch_start(1, block_byte_estimate)) {
m_blockchain_lock.unlock();
m_tx_pool.unlock();
epee::misc_utils::sleep_no_w(1000);
m_tx_pool.lock();
m_blockchain_lock.lock();
}
m_batch_success = true;
}
void Blockchain::add_txpool_tx(const crypto::hash &txid, const cryptonote::blobdata &blob, const txpool_tx_meta_t &meta)
{
m_db->add_txpool_tx(txid, blob, meta);
@ -5279,6 +5395,12 @@ void Blockchain::set_user_options(uint64_t maxthreads, bool sync_on_blocks, uint
m_max_prepare_blocks_threads = maxthreads;
}
void Blockchain::set_txpool_notify(TxpoolNotifyCallback&& notify)
{
std::lock_guard<decltype(m_txpool_notifier_mutex)> lg(m_txpool_notifier_mutex);
m_txpool_notifier = notify;
}
void Blockchain::add_block_notify(BlockNotifyCallback&& notify)
{
if (notify)
@ -5297,6 +5419,22 @@ void Blockchain::add_miner_notify(MinerNotifyCallback&& notify)
}
}
void Blockchain::notify_txpool_event(std::vector<txpool_event>&& event)
{
std::lock_guard<decltype(m_txpool_notifier_mutex)> lg(m_txpool_notifier_mutex);
if (m_txpool_notifier)
{
try
{
m_txpool_notifier(std::move(event));
}
catch (const std::exception &e)
{
MDEBUG("During Blockchain::notify_txpool_event(), ignored exception: " << e.what());
}
}
}
void Blockchain::safesyncmode(const bool onoff)
{
/* all of this is no-op'd if the user set a specific

View File

@ -92,6 +92,7 @@ namespace cryptonote
*/
typedef std::function<const epee::span<const unsigned char>(cryptonote::network_type network)> GetCheckpointsCallback;
typedef boost::function<void(std::vector<txpool_event>)> TxpoolNotifyCallback;
typedef boost::function<void(uint64_t /* height */, epee::span<const block> /* blocks */)> BlockNotifyCallback;
typedef boost::function<void(uint8_t /* major_version */, uint64_t /* height */, const crypto::hash& /* prev_id */, const crypto::hash& /* seed_hash */, difficulty_type /* diff */, uint64_t /* median_weight */, uint64_t /* already_generated_coins */, const std::vector<tx_block_template_backlog_entry>& /* tx_backlog */)> MinerNotifyCallback;
@ -248,6 +249,16 @@ namespace cryptonote
*/
bool prepare_handle_incoming_blocks(const std::vector<block_complete_entry> &blocks_entry, std::vector<block> &blocks);
/**
* @brief prepare the blockchain for handling an incoming block, without performing preprocessing
*
* @param block_byte_estimate an estimate of the byte size of the block & its transactions
*
* This function should *always* be followed up by a call to cleanup_handle_incoming_blocks()
* later in the same thread.
*/
void prepare_handle_incoming_block_no_preprocess(const size_t block_byte_estimate);
/**
* @brief incoming blocks post-processing, cleanup, and disk sync
*
@ -345,11 +356,15 @@ namespace cryptonote
*
* @param bl_ the block to be added
* @param bvc metadata about the block addition's success/failure
* @param extra_block_txs txs belonging to this block that may not be in the mempool
*
* @return true on successful addition to the blockchain, else false
*/
bool add_new_block(const block& bl_, block_verification_context& bvc);
bool add_new_block(const block& bl_, block_verification_context& bvc,
pool_supplement& extra_block_txs);
/**
* @brief clears the blockchain and starts a new one
*
@ -696,10 +711,13 @@ namespace cryptonote
*
* @param tx the transaction to check the outputs of
* @param tvc returned info about tx verification
* @param hf_version hard fork version
*
* @return false if any outputs do not conform, otherwise true
*/
bool check_tx_outputs(const transaction& tx, tx_verification_context &tvc) const;
static bool check_tx_outputs(const transaction& tx,
tx_verification_context &tvc,
std::uint8_t hf_version);
/**
* @brief gets the block weight limit based on recent blocks
@ -813,6 +831,13 @@ namespace cryptonote
void set_user_options(uint64_t maxthreads, bool sync_on_blocks, uint64_t sync_threshold,
blockchain_db_sync_mode sync_mode, bool fast_sync);
/**
* @brief sets a txpool notify object to call for every new tx used to add a new block
*
* @param notify the notify object to call at every new tx used to add a new block
*/
void set_txpool_notify(TxpoolNotifyCallback&& notify);
/**
* @brief sets a block notify object to call for every new block
*
@ -834,6 +859,11 @@ namespace cryptonote
*/
void set_reorg_notify(const std::shared_ptr<tools::Notify> &notify) { m_reorg_notify = notify; }
/**
* @brief Notify this Blockchain's txpool notifier about a txpool event
*/
void notify_txpool_event(std::vector<txpool_event>&& event);
/**
* @brief Put DB in safe sync mode
*/
@ -1067,13 +1097,6 @@ namespace cryptonote
void cancel();
/**
* @brief called when we see a tx originating from a block
*
* Used for handling txes from historical blocks in a fast way
*/
void on_new_tx_from_block(const cryptonote::transaction &tx);
/**
* @brief returns the timestamps of the last N blocks
*/
@ -1150,7 +1173,6 @@ namespace cryptonote
// Keccak hashes for each block and for fast pow checking
std::vector<std::pair<crypto::hash, crypto::hash>> m_blocks_hash_of_hashes;
std::vector<std::pair<crypto::hash, uint64_t>> m_blocks_hash_check;
std::vector<crypto::hash> m_blocks_txs_check;
blockchain_db_sync_mode m_db_sync_mode;
bool m_fast_sync;
@ -1211,6 +1233,9 @@ namespace cryptonote
bool m_batch_success;
TxpoolNotifyCallback m_txpool_notifier;
mutable std::mutex m_txpool_notifier_mutex;
/* `boost::function` is used because the implementation never allocates if
the callable object has a single `std::shared_ptr` or `std::weap_ptr`
internally. Whereas, the libstdc++ `std::function` will allocate. */
@ -1331,11 +1356,10 @@ namespace cryptonote
*
* @param bl the block to be added
* @param bvc metadata concerning the block's validity
* @param notify if set to true, sends new block notification on success
*
* @return true if the block was added successfully, otherwise false
*/
bool handle_block_to_main_chain(const block& bl, block_verification_context& bvc, bool notify = true);
bool handle_block_to_main_chain(const block& bl, block_verification_context& bvc);
/**
* @brief validate and add a new block to the end of the blockchain
@ -1347,11 +1371,12 @@ namespace cryptonote
* @param bl the block to be added
* @param id the hash of the block
* @param bvc metadata concerning the block's validity
* @param notify if set to true, sends new block notification on success
* @param extra_block_txs txs belonging to this block that may not be in the mempool
*
* @return true if the block was added successfully, otherwise false
*/
bool handle_block_to_main_chain(const block& bl, const crypto::hash& id, block_verification_context& bvc, bool notify = true);
bool handle_block_to_main_chain(const block& bl, const crypto::hash& id,
block_verification_context& bvc, pool_supplement& extra_block_txs);
/**
* @brief validate and add a new block to an alternate blockchain
@ -1363,10 +1388,12 @@ namespace cryptonote
* @param b the block to be added
* @param id the hash of the block
* @param bvc metadata concerning the block's validity
* @param extra_block_txs txs belonging to this block that may not be in the mempool
*
* @return true if the block was added successfully, otherwise false
*/
bool handle_alternative_block(const block& b, const crypto::hash& id, block_verification_context& bvc);
bool handle_alternative_block(const block& b, const crypto::hash& id,
block_verification_context& bvc, pool_supplement& extra_block_txs);
/**
* @brief builds a list of blocks connecting a block to the main chain
@ -1551,7 +1578,6 @@ namespace cryptonote
* @return true
*/
bool update_next_cumulative_weight_limit(uint64_t *long_term_effective_median_block_weight = NULL);
void return_tx_to_pool(std::vector<std::pair<transaction, blobdata>> &txs);
/**
* @brief make sure a transaction isn't attempting a double-spend

View File

@ -55,6 +55,7 @@ using namespace epee;
#include "rpc/zmq_pub.h"
#include "common/notify.h"
#include "hardforks/hardforks.h"
#include "tx_verification_utils.h"
#include "version.h"
#include <boost/filesystem.hpp>
@ -66,8 +67,6 @@ DISABLE_VS_WARNINGS(4355)
#define MERROR_VER(x) MCERROR("verify", x)
#define BAD_SEMANTICS_TXES_MAX_SIZE 100
// basically at least how many bytes the block itself serializes to without the miner tx
#define BLOCK_SIZE_SANITY_LEEWAY 100
@ -173,11 +172,6 @@ namespace cryptonote
, "Check for new versions of monero: [disabled|notify|download|update]"
, "notify"
};
static const command_line::arg_descriptor<bool> arg_no_fluffy_blocks = {
"no-fluffy-blocks"
, "Relay blocks as normal blocks"
, false
};
static const command_line::arg_descriptor<size_t> arg_max_txpool_weight = {
"max-txpool-weight"
, "Set maximum txpool weight in bytes."
@ -266,13 +260,6 @@ namespace cryptonote
{
m_blockchain_storage.set_enforce_dns_checkpoints(enforce_dns);
}
//-----------------------------------------------------------------------------------
void core::set_txpool_listener(boost::function<void(std::vector<txpool_event>)> zmq_pub)
{
CRITICAL_REGION_LOCAL(m_incoming_tx_lock);
m_zmq_pub = std::move(zmq_pub);
}
//-----------------------------------------------------------------------------------------------
bool core::update_checkpoints(const bool skip_dns /* = false */)
{
@ -336,7 +323,6 @@ namespace cryptonote
command_line::add_arg(desc, arg_show_time_stats);
command_line::add_arg(desc, arg_block_sync_size);
command_line::add_arg(desc, arg_check_updates);
command_line::add_arg(desc, arg_no_fluffy_blocks);
command_line::add_arg(desc, arg_test_dbg_lock_sleep);
command_line::add_arg(desc, arg_offline);
command_line::add_arg(desc, arg_disable_dns_checkpoints);
@ -384,7 +370,6 @@ namespace cryptonote
set_enforce_dns_checkpoints(command_line::get_arg(vm, arg_dns_checkpoints));
test_drop_download_height(command_line::get_arg(vm, arg_test_drop_download_height));
m_fluffy_blocks_enabled = !get_arg(vm, arg_no_fluffy_blocks);
m_offline = get_arg(vm, arg_offline);
m_disable_dns_checkpoints = get_arg(vm, arg_disable_dns_checkpoints);
@ -780,359 +765,81 @@ namespace cryptonote
return false;
}
//-----------------------------------------------------------------------------------------------
bool core::handle_incoming_tx_pre(const tx_blob_entry& tx_blob, tx_verification_context& tvc, cryptonote::transaction &tx, crypto::hash &tx_hash)
bool core::handle_incoming_tx(const blobdata& tx_blob, tx_verification_context& tvc, relay_method tx_relay, bool relayed)
{
tvc = {};
if(tx_blob.blob.size() > get_max_tx_size())
TRY_ENTRY();
CRITICAL_REGION_LOCAL(m_incoming_tx_lock);
if (tx_blob.size() > get_max_tx_size())
{
LOG_PRINT_L1("WRONG TRANSACTION BLOB, too big size " << tx_blob.blob.size() << ", rejected");
LOG_PRINT_L1("WRONG TRANSACTION BLOB, too big size " << tx_blob.size() << ", rejected");
tvc.m_verifivation_failed = true;
tvc.m_too_big = true;
return false;
}
tx_hash = crypto::null_hash;
bool r;
if (tx_blob.prunable_hash == crypto::null_hash)
transaction tx;
crypto::hash txid;
if (!parse_and_validate_tx_from_blob(tx_blob, tx, txid))
{
r = parse_tx_from_blob(tx, tx_hash, tx_blob.blob);
}
else
{
r = parse_and_validate_tx_base_from_blob(tx_blob.blob, tx);
if (r)
{
tx.set_prunable_hash(tx_blob.prunable_hash);
tx_hash = cryptonote::get_pruned_transaction_hash(tx, tx_blob.prunable_hash);
tx.set_hash(tx_hash);
}
}
if (!r)
{
LOG_PRINT_L1("WRONG TRANSACTION BLOB, Failed to parse, rejected");
tvc.m_verifivation_failed = true;
return false;
}
//std::cout << "!"<< tx.vin.size() << std::endl;
bad_semantics_txes_lock.lock();
for (int idx = 0; idx < 2; ++idx)
{
if (bad_semantics_txes[idx].find(tx_hash) != bad_semantics_txes[idx].end())
{
bad_semantics_txes_lock.unlock();
LOG_PRINT_L1("Transaction already seen with bad semantics, rejected");
tvc.m_verifivation_failed = true;
return false;
}
}
bad_semantics_txes_lock.unlock();
uint8_t version = m_blockchain_storage.get_current_hard_fork_version();
const size_t max_tx_version = version == 1 ? 1 : 2;
if (tx.version == 0 || tx.version > max_tx_version)
{
// v2 is the latest one we know
MERROR_VER("Bad tx version (" << tx.version << ", max is " << max_tx_version << ")");
LOG_PRINT_L1("Incoming transactions failed to parse, rejected");
tvc.m_verifivation_failed = true;
return false;
}
return true;
}
//-----------------------------------------------------------------------------------------------
bool core::handle_incoming_tx_post(const tx_blob_entry& tx_blob, tx_verification_context& tvc, cryptonote::transaction &tx, crypto::hash &tx_hash)
{
if(!check_tx_syntax(tx))
{
LOG_PRINT_L1("WRONG TRANSACTION BLOB, Failed to check tx " << tx_hash << " syntax, rejected");
tvc.m_verifivation_failed = true;
const uint64_t tx_weight = get_transaction_weight(tx, tx_blob.size());
if (!add_new_tx(tx, txid, tx_blob, tx_weight, tvc, tx_relay, relayed))
return false;
}
return true;
}
//-----------------------------------------------------------------------------------------------
void core::set_semantics_failed(const crypto::hash &tx_hash)
{
LOG_PRINT_L1("WRONG TRANSACTION BLOB, Failed to check tx " << tx_hash << " semantic, rejected");
bad_semantics_txes_lock.lock();
bad_semantics_txes[0].insert(tx_hash);
if (bad_semantics_txes[0].size() >= BAD_SEMANTICS_TXES_MAX_SIZE)
if (tvc.m_verifivation_failed)
{
std::swap(bad_semantics_txes[0], bad_semantics_txes[1]);
bad_semantics_txes[0].clear();
MERROR_VER("Transaction verification failed: " << txid);
return false;
}
bad_semantics_txes_lock.unlock();
}
//-----------------------------------------------------------------------------------------------
static bool is_canonical_bulletproof_layout(const std::vector<rct::Bulletproof> &proofs)
{
if (proofs.size() != 1)
return false;
const size_t sz = proofs[0].V.size();
if (sz == 0 || sz > BULLETPROOF_MAX_OUTPUTS)
return false;
return true;
}
//-----------------------------------------------------------------------------------------------
static bool is_canonical_bulletproof_plus_layout(const std::vector<rct::BulletproofPlus> &proofs)
{
if (proofs.size() != 1)
return false;
const size_t sz = proofs[0].V.size();
if (sz == 0 || sz > BULLETPROOF_PLUS_MAX_OUTPUTS)
return false;
return true;
}
//-----------------------------------------------------------------------------------------------
bool core::handle_incoming_tx_accumulated_batch(std::vector<tx_verification_batch_info> &tx_info, bool keeped_by_block)
{
bool ret = true;
if (keeped_by_block && get_blockchain_storage().is_within_compiled_block_hash_area())
else if (tvc.m_verifivation_impossible)
{
MTRACE("Skipping semantics check for tx kept by block in embedded hash area");
MERROR_VER("Transaction verification impossible: " << txid);
return false;
}
else if (!tvc.m_added_to_pool)
{
MDEBUG("Transaction " << txid << " not added to pool");
return true;
}
std::vector<const rct::rctSig*> rvv;
for (size_t n = 0; n < tx_info.size(); ++n)
{
if (!check_tx_semantic(*tx_info[n].tx, keeped_by_block))
{
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
continue;
}
MDEBUG("tx added to pool: " << txid);
if (tx_info[n].tx->version < 2)
continue;
const rct::rctSig &rv = tx_info[n].tx->rct_signatures;
switch (rv.type) {
case rct::RCTTypeNull:
// coinbase should not come here, so we reject for all other types
MERROR_VER("Unexpected Null rctSig type");
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
break;
case rct::RCTTypeSimple:
if (!rct::verRctSemanticsSimple(rv))
{
MERROR_VER("rct signature semantics check failed");
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
break;
}
break;
case rct::RCTTypeFull:
if (!rct::verRct(rv, true))
{
MERROR_VER("rct signature semantics check failed");
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
break;
}
break;
case rct::RCTTypeBulletproof:
case rct::RCTTypeBulletproof2:
case rct::RCTTypeCLSAG:
if (!is_canonical_bulletproof_layout(rv.p.bulletproofs))
{
MERROR_VER("Bulletproof does not have canonical form");
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
break;
}
rvv.push_back(&rv); // delayed batch verification
break;
case rct::RCTTypeBulletproofPlus:
if (!is_canonical_bulletproof_plus_layout(rv.p.bulletproofs_plus))
{
MERROR_VER("Bulletproof_plus does not have canonical form");
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
break;
}
rvv.push_back(&rv); // delayed batch verification
break;
default:
MERROR_VER("Unknown rct type: " << rv.type);
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
break;
}
}
if (!rvv.empty() && !rct::verRctSemanticsSimple(rvv))
{
LOG_PRINT_L1("One transaction among this group has bad semantics, verifying one at a time");
ret = false;
const bool assumed_bad = rvv.size() == 1; // if there's only one tx, it must be the bad one
for (size_t n = 0; n < tx_info.size(); ++n)
{
if (!tx_info[n].result)
continue;
if (tx_info[n].tx->rct_signatures.type != rct::RCTTypeBulletproof && tx_info[n].tx->rct_signatures.type != rct::RCTTypeBulletproof2 && tx_info[n].tx->rct_signatures.type != rct::RCTTypeCLSAG && tx_info[n].tx->rct_signatures.type != rct::RCTTypeBulletproofPlus)
continue;
if (assumed_bad || !rct::verRctSemanticsSimple(tx_info[n].tx->rct_signatures))
{
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
}
}
}
return ret;
return true;
CATCH_ENTRY_L0("core::handle_incoming_tx()", false);
}
//-----------------------------------------------------------------------------------------------
bool core::handle_incoming_txs(const epee::span<const tx_blob_entry> tx_blobs, epee::span<tx_verification_context> tvc, relay_method tx_relay, bool relayed)
{
TRY_ENTRY();
if (tx_blobs.size() != tvc.size())
{
MERROR("tx_blobs and tx_verification_context spans must have equal size");
return false;
}
std::vector<txpool_event> results(tx_blobs.size());
CRITICAL_REGION_LOCAL(m_incoming_tx_lock);
tools::threadpool& tpool = tools::threadpool::getInstanceForCompute();
tools::threadpool::waiter waiter(tpool);
epee::span<tx_blob_entry>::const_iterator it = tx_blobs.begin();
for (size_t i = 0; i < tx_blobs.size(); i++, ++it) {
tpool.submit(&waiter, [&, i, it] {
try
{
results[i].res = handle_incoming_tx_pre(*it, tvc[i], results[i].tx, results[i].hash);
}
catch (const std::exception &e)
{
MERROR_VER("Exception in handle_incoming_tx_pre: " << e.what());
tvc[i].m_verifivation_failed = true;
results[i].res = false;
}
});
}
if (!waiter.wait())
return false;
it = tx_blobs.begin();
std::vector<bool> already_have(tx_blobs.size(), false);
for (size_t i = 0; i < tx_blobs.size(); i++, ++it) {
if (!results[i].res)
continue;
if(m_mempool.have_tx(results[i].hash, relay_category::legacy))
{
LOG_PRINT_L2("tx " << results[i].hash << "already have transaction in tx_pool");
already_have[i] = true;
}
else if(m_blockchain_storage.have_tx(results[i].hash))
{
LOG_PRINT_L2("tx " << results[i].hash << " already have transaction in blockchain");
already_have[i] = true;
}
else
{
tpool.submit(&waiter, [&, i, it] {
try
{
results[i].res = handle_incoming_tx_post(*it, tvc[i], results[i].tx, results[i].hash);
}
catch (const std::exception &e)
{
MERROR_VER("Exception in handle_incoming_tx_post: " << e.what());
tvc[i].m_verifivation_failed = true;
results[i].res = false;
}
});
}
}
if (!waiter.wait())
return false;
std::vector<tx_verification_batch_info> tx_info;
tx_info.reserve(tx_blobs.size());
for (size_t i = 0; i < tx_blobs.size(); i++) {
if (!results[i].res || already_have[i])
continue;
tx_info.push_back({&results[i].tx, results[i].hash, tvc[i], results[i].res});
}
if (!tx_info.empty())
handle_incoming_tx_accumulated_batch(tx_info, tx_relay == relay_method::block);
bool valid_events = false;
bool ok = true;
it = tx_blobs.begin();
for (size_t i = 0; i < tx_blobs.size(); i++, ++it) {
if (!results[i].res)
{
ok = false;
continue;
}
if (tx_relay == relay_method::block)
get_blockchain_storage().on_new_tx_from_block(results[i].tx);
if (already_have[i])
continue;
results[i].blob_size = it->blob.size();
results[i].weight = results[i].tx.pruned ? get_pruned_transaction_weight(results[i].tx) : get_transaction_weight(results[i].tx, it->blob.size());
ok &= add_new_tx(results[i].tx, results[i].hash, tx_blobs[i].blob, results[i].weight, tvc[i], tx_relay, relayed);
if(tvc[i].m_verifivation_failed)
{MERROR_VER("Transaction verification failed: " << results[i].hash);}
else if(tvc[i].m_verifivation_impossible)
{MERROR_VER("Transaction verification impossible: " << results[i].hash);}
if(tvc[i].m_added_to_pool && results[i].tx.extra.size() <= MAX_TX_EXTRA_SIZE)
{
MDEBUG("tx added: " << results[i].hash);
valid_events = true;
}
else
results[i].res = false;
}
if (valid_events && m_zmq_pub && matches_category(tx_relay, relay_category::legacy))
m_zmq_pub(std::move(results));
return ok;
CATCH_ENTRY_L0("core::handle_incoming_txs()", false);
}
//-----------------------------------------------------------------------------------------------
bool core::handle_incoming_tx(const tx_blob_entry& tx_blob, tx_verification_context& tvc, relay_method tx_relay, bool relayed)
{
return handle_incoming_txs({std::addressof(tx_blob), 1}, {std::addressof(tvc), 1}, tx_relay, relayed);
}
//-----------------------------------------------------------------------------------------------
bool core::check_tx_semantic(const transaction& tx, bool keeped_by_block) const
bool core::check_tx_semantic(const transaction& tx, tx_verification_context& tvc,
uint8_t hf_version)
{
if(!tx.vin.size())
{
MERROR_VER("tx with empty inputs, rejected for tx id= " << get_transaction_hash(tx));
tvc.m_verifivation_failed = true;
tvc.m_invalid_input = true;
return false;
}
if(!check_inputs_types_supported(tx))
{
MERROR_VER("unsupported input types for tx id= " << get_transaction_hash(tx));
tvc.m_verifivation_failed = true;
tvc.m_invalid_input = true;
return false;
}
if(!check_outs_valid(tx))
{
MERROR_VER("tx with invalid outputs, rejected for tx id= " << get_transaction_hash(tx));
tvc.m_verifivation_failed = true;
tvc.m_invalid_output = true;
return false;
}
if (tx.version > 1)
@ -1140,6 +847,8 @@ namespace cryptonote
if (tx.rct_signatures.outPk.size() != tx.vout.size())
{
MERROR_VER("tx with mismatched vout/outPk count, rejected for tx id= " << get_transaction_hash(tx));
tvc.m_verifivation_failed = true;
tvc.m_invalid_output = true;
return false;
}
}
@ -1147,6 +856,8 @@ namespace cryptonote
if(!check_money_overflow(tx))
{
MERROR_VER("tx has money overflow, rejected for tx id= " << get_transaction_hash(tx));
tvc.m_verifivation_failed = true;
tvc.m_overspend = true;
return false;
}
@ -1159,40 +870,43 @@ namespace cryptonote
if(amount_in <= amount_out)
{
MERROR_VER("tx with wrong amounts: ins " << amount_in << ", outs " << amount_out << ", rejected for tx id= " << get_transaction_hash(tx));
tvc.m_verifivation_failed = true;
tvc.m_overspend = true;
return false;
}
}
// for version > 1, ringct signatures check verifies amounts match
if(!keeped_by_block && get_transaction_weight(tx) >= m_blockchain_storage.get_current_cumulative_block_weight_limit() - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE)
{
MERROR_VER("tx is too large " << get_transaction_weight(tx) << ", expected not bigger than " << m_blockchain_storage.get_current_cumulative_block_weight_limit() - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE);
return false;
}
//check if tx use different key images
if(!check_tx_inputs_keyimages_diff(tx))
{
MERROR_VER("tx uses a single key image more than once");
tvc.m_verifivation_failed = true;
tvc.m_invalid_input = true;
return false;
}
const uint8_t hf_version = m_blockchain_storage.get_current_hard_fork_version();
if (!check_tx_inputs_ring_members_diff(tx, hf_version))
{
MERROR_VER("tx uses duplicate ring members");
tvc.m_verifivation_failed = true;
tvc.m_invalid_input = true;
return false;
}
if (!check_tx_inputs_keyimages_domain(tx))
{
MERROR_VER("tx uses key image not in the valid domain");
tvc.m_verifivation_failed = true;
tvc.m_invalid_input = true;
return false;
}
if (!check_output_types(tx, hf_version))
{
MERROR_VER("tx does not use valid output type(s)");
tvc.m_verifivation_failed = true;
tvc.m_invalid_output = true;
return false;
}
@ -1290,7 +1004,7 @@ namespace cryptonote
return std::pair<boost::multiprecision::uint128_t, boost::multiprecision::uint128_t>(emission_amount, total_fee_amount);
}
//-----------------------------------------------------------------------------------------------
bool core::check_tx_inputs_keyimages_diff(const transaction& tx) const
bool core::check_tx_inputs_keyimages_diff(const transaction& tx)
{
std::unordered_set<crypto::key_image> ki;
for(const auto& in: tx.vin)
@ -1302,7 +1016,7 @@ namespace cryptonote
return true;
}
//-----------------------------------------------------------------------------------------------
bool core::check_tx_inputs_ring_members_diff(const transaction& tx, const uint8_t hf_version) const
bool core::check_tx_inputs_ring_members_diff(const transaction& tx, const uint8_t hf_version)
{
if (hf_version >= 6)
{
@ -1317,7 +1031,7 @@ namespace cryptonote
return true;
}
//-----------------------------------------------------------------------------------------------
bool core::check_tx_inputs_keyimages_domain(const transaction& tx) const
bool core::check_tx_inputs_keyimages_domain(const transaction& tx)
{
std::unordered_set<crypto::key_image> ki;
for(const auto& in: tx.vin)
@ -1358,7 +1072,20 @@ namespace cryptonote
}
uint8_t version = m_blockchain_storage.get_current_hard_fork_version();
return m_mempool.add_tx(tx, tx_hash, blob, tx_weight, tvc, tx_relay, relayed, version);
const bool res = m_mempool.add_tx(tx, tx_hash, blob, tx_weight, tvc, tx_relay, relayed, version);
// If new incoming tx passed verification and entered the pool, notify ZMQ
if (!tvc.m_verifivation_failed && tvc.m_added_to_pool && matches_category(tx_relay, relay_category::legacy))
{
m_blockchain_storage.notify_txpool_event({txpool_event{
.tx = tx,
.hash = tx_hash,
.blob_size = blob.size(),
.weight = tx_weight,
.res = true}});
}
return res;
}
//-----------------------------------------------------------------------------------------------
bool core::relay_txpool_transactions()
@ -1408,14 +1135,11 @@ namespace cryptonote
//-----------------------------------------------------------------------------------------------
bool core::notify_txpool_event(const epee::span<const cryptonote::blobdata> tx_blobs, epee::span<const crypto::hash> tx_hashes, epee::span<const cryptonote::transaction> txs, const std::vector<bool> &just_broadcasted) const
{
if (!m_zmq_pub)
return true;
if (tx_blobs.size() != tx_hashes.size() || tx_blobs.size() != txs.size() || tx_blobs.size() != just_broadcasted.size())
return false;
/* Publish txs via ZMQ that are "just broadcasted" by the daemon. This is
done here in addition to `handle_incoming_txs` in order to guarantee txs
done here in order to guarantee txs
are pub'd via ZMQ when we know the daemon has/will broadcast to other
nodes & *after* the tx is visible in the pool. This should get called
when the user submits a tx to a daemon in the "fluff" epoch relaying txs
@ -1434,7 +1158,7 @@ namespace cryptonote
results[i].res = just_broadcasted[i];
}
m_zmq_pub(std::move(results));
m_blockchain_storage.notify_txpool_event(std::move(results));
return true;
}
@ -1464,7 +1188,7 @@ namespace cryptonote
m_mempool.set_relayed(epee::to_span(tx_hashes), tx_relay, just_broadcasted);
if (m_zmq_pub && matches_category(tx_relay, relay_category::legacy))
if (matches_category(tx_relay, relay_category::legacy))
notify_txpool_event(tx_blobs, epee::to_span(tx_hashes), epee::to_span(txs), just_broadcasted);
}
//-----------------------------------------------------------------------------------------------
@ -1570,7 +1294,7 @@ namespace cryptonote
if(bvc.m_added_to_main_chain)
{
cryptonote_connection_context exclude_context = {};
NOTIFY_NEW_BLOCK::request arg = AUTO_VAL_INIT(arg);
NOTIFY_NEW_FLUFFY_BLOCK::request arg{};
arg.current_blockchain_height = m_blockchain_storage.get_current_blockchain_height();
std::vector<crypto::hash> missed_txs;
std::vector<cryptonote::blobdata> txs;
@ -1608,11 +1332,11 @@ namespace cryptonote
m_blockchain_storage.safesyncmode(onoff);
}
//-----------------------------------------------------------------------------------------------
bool core::add_new_block(const block& b, block_verification_context& bvc)
bool core::add_new_block(const block& b, block_verification_context& bvc,
pool_supplement& extra_block_txs)
{
return m_blockchain_storage.add_new_block(b, bvc);
return m_blockchain_storage.add_new_block(b, bvc, extra_block_txs);
}
//-----------------------------------------------------------------------------------------------
bool core::prepare_handle_incoming_blocks(const std::vector<block_complete_entry> &blocks_entry, std::vector<block> &blocks)
{
@ -1638,7 +1362,16 @@ namespace cryptonote
}
//-----------------------------------------------------------------------------------------------
bool core::handle_incoming_block(const blobdata& block_blob, const block *b, block_verification_context& bvc, bool update_miner_blocktemplate)
bool core::handle_incoming_block(const blobdata& block_blob, const block *b,
block_verification_context& bvc, bool update_miner_blocktemplate)
{
pool_supplement ps{};
return handle_incoming_block(block_blob, b, bvc, ps, update_miner_blocktemplate);
}
//-----------------------------------------------------------------------------------------------
bool core::handle_incoming_block(const blobdata& block_blob, const block *b,
block_verification_context& bvc, pool_supplement& extra_block_txs, bool update_miner_blocktemplate)
{
TRY_ENTRY();
@ -1665,7 +1398,7 @@ namespace cryptonote
}
b = &lb;
}
add_new_block(*b, bvc);
add_new_block(*b, bvc, extra_block_txs);
if(update_miner_blocktemplate && bvc.m_added_to_main_chain)
update_miner_block_template();
return true;
@ -1673,6 +1406,39 @@ namespace cryptonote
CATCH_ENTRY_L0("core::handle_incoming_block()", false);
}
//-----------------------------------------------------------------------------------------------
bool core::handle_single_incoming_block(const blobdata& block_blob,
const block *b,
block_verification_context& bvc,
pool_supplement& extra_block_txs,
bool update_miner_blocktemplate)
{
// Note: this estimate can be quite far off since fluffy blocks won't contain all their
// transactions in the payload, but also this value doesn't *need* to be super precise. It
// is used to trigger database backing store syncing once it hits a threshold, and since
// we under-count the byte size here, it might result in under-syncing the backing store.
// If force refresh is enabled, though, which the user turns on if they are vigilant about
// saving each block, then it doesn't matter either way: cleanup_handle_incoming_blocks()
// always triggers a sync.
size_t block_total_bytes = block_blob.size();
for (const auto &t : extra_block_txs.txs_by_txid)
block_total_bytes += t.second.second.size();
CRITICAL_REGION_LOCAL(m_incoming_tx_lock);
// Match each call to prepare_handle_incoming_block_no_preprocess() with a call to
// cleanup_handle_incoming_blocks()
m_blockchain_storage.prepare_handle_incoming_block_no_preprocess(block_total_bytes);
const auto auto_cleanup = epee::misc_utils::create_scope_leave_handler([this](){
this->m_blockchain_storage.cleanup_handle_incoming_blocks();
});
return handle_incoming_block(block_blob,
b,
bvc,
extra_block_txs,
update_miner_blocktemplate);
}
//-----------------------------------------------------------------------------------------------
// Used by the RPC server to check the size of an incoming
// block_blob
bool core::check_incoming_block_size(const blobdata& block_blob) const
@ -1714,16 +1480,6 @@ namespace cryptonote
return m_blockchain_storage.have_block(id, where);
}
//-----------------------------------------------------------------------------------------------
bool core::parse_tx_from_blob(transaction& tx, crypto::hash& tx_hash, const blobdata& blob) const
{
return parse_and_validate_tx_from_blob(blob, tx, tx_hash);
}
//-----------------------------------------------------------------------------------------------
bool core::check_tx_syntax(const transaction& tx) const
{
return true;
}
//-----------------------------------------------------------------------------------------------
bool core::get_pool_transactions_info(const std::vector<crypto::hash>& txids, std::vector<std::pair<crypto::hash, tx_memory_pool::tx_details>>& txs, bool include_sensitive_txes) const
{
return m_mempool.get_transactions_info(txids, txs, include_sensitive_txes);
@ -2074,14 +1830,6 @@ namespace cryptonote
return true;
}
//-----------------------------------------------------------------------------------------------
void core::flush_bad_txs_cache()
{
bad_semantics_txes_lock.lock();
for (int idx = 0; idx < 2; ++idx)
bad_semantics_txes[idx].clear();
bad_semantics_txes_lock.unlock();
}
//-----------------------------------------------------------------------------------------------
void core::flush_invalid_blocks()
{
m_blockchain_storage.flush_invalid_blocks();

View File

@ -125,60 +125,63 @@ namespace cryptonote
*
* @return true if the transaction was accepted, false otherwise
*/
bool handle_incoming_tx(const tx_blob_entry& tx_blob, tx_verification_context& tvc, relay_method tx_relay, bool relayed);
bool handle_incoming_tx(const blobdata& tx_blob, tx_verification_context& tvc, relay_method tx_relay, bool relayed);
/**
* @brief handles a list of incoming transactions
*
* Parses incoming transactions and, if nothing is obviously wrong,
* passes them along to the transaction pool
*
* @pre `tx_blobs.size() == tvc.size()`
*
* @param tx_blobs the txs to handle
* @param tvc metadata about the transactions' validity
* @param tx_relay how the transaction was received.
* @param relayed whether or not the transactions were relayed to us
*
* @return true if the transactions were accepted, false otherwise
*/
bool handle_incoming_txs(epee::span<const tx_blob_entry> tx_blobs, epee::span<tx_verification_context> tvc, relay_method tx_relay, bool relayed);
/**
* @brief handles a list of incoming transactions
*
* Parses incoming transactions and, if nothing is obviously wrong,
* passes them along to the transaction pool
*
* @param tx_blobs the txs to handle
* @param tvc metadata about the transactions' validity
* @param tx_relay how the transaction was received.
* @param relayed whether or not the transactions were relayed to us
*
* @return true if the transactions were accepted, false otherwise
*/
bool handle_incoming_txs(const std::vector<tx_blob_entry>& tx_blobs, std::vector<tx_verification_context>& tvc, relay_method tx_relay, bool relayed)
{
tvc.resize(tx_blobs.size());
return handle_incoming_txs(epee::to_span(tx_blobs), epee::to_mut_span(tvc), tx_relay, relayed);
}
/**
* @brief handles an incoming block
/**
* @brief handles a single incoming block
*
* periodic update to checkpoints is triggered here
* Attempts to add the block to the Blockchain and, on success,
* optionally updates the miner's block template.
*
* Unlike handle_incoming_block(), a write transaction is created in this method, which means
* the caller doesn't have to call prepare_handle_incoming_blocks() nor
* cleanup_handle_incoming_blocks() surrounding this call.
*
* @param block_blob the block to be added
* @param block the block to be added, or NULL
* @param bvc return-by-reference metadata context about the block's validity
* @param extra_block_txs txs belonging to this block that may not be in the mempool
* @param update_miner_blocktemplate whether or not to update the miner's block template
*
* @return false if loading new checkpoints fails, or the block is not
* added, otherwise true
*/
bool handle_incoming_block(const blobdata& block_blob, const block *b, block_verification_context& bvc, bool update_miner_blocktemplate = true);
bool handle_single_incoming_block(const blobdata& block_blob,
const block *b,
block_verification_context& bvc,
pool_supplement& extra_block_txs,
bool update_miner_blocktemplate = true);
/**
* @brief handles an incoming block as part of a batch
*
* periodic update to checkpoints is triggered here
* Attempts to add the block to the Blockchain and, on success,
* optionally updates the miner's block template.
*
* Prerequisite: There must be an active write transaction for the blockchain storage on this
* thread. Typically, this is done by calling prepare_handle_incoming_blocks() on
* this thread before calls to handle_incoming_block(). Then, after calls to
* handle_incoming_block(), a call to cleanup_handle_incoming_blocks() is made
* on this thread to either abort or commit the write transaction.
*
* @param block_blob the block to be added
* @param block the block to be added, or NULL
* @param bvc return-by-reference metadata context about the block's validity
* @param extra_block_txs txs belonging to this block that may not be in the mempool
* @param update_miner_blocktemplate whether or not to update the miner's block template
*
* @return false if loading new checkpoints fails, or the block is not
* added, otherwise true
*/
bool handle_incoming_block(const blobdata& block_blob, const block *b,
block_verification_context& bvc,
bool update_miner_blocktemplate = true);
bool handle_incoming_block(const blobdata& block_blob, const block *b,
block_verification_context& bvc, pool_supplement& extra_block_txs,
bool update_miner_blocktemplate = true);
/**
* @copydoc Blockchain::prepare_handle_incoming_blocks
@ -463,13 +466,6 @@ namespace cryptonote
*/
void set_enforce_dns_checkpoints(bool enforce_dns);
/**
* @brief set a listener for txes being added to the txpool
*
* @param callable to notify, or empty function to disable.
*/
void set_txpool_listener(boost::function<void(std::vector<txpool_event>)> zmq_pub);
/**
* @brief set whether or not to enable or disable DNS checkpoints
*
@ -826,13 +822,6 @@ namespace cryptonote
*/
bool is_update_available() const { return m_update_available; }
/**
* @brief get whether fluffy blocks are enabled
*
* @return whether fluffy blocks are enabled
*/
bool fluffy_blocks_enabled() const { return m_fluffy_blocks_enabled; }
/**
* @brief check a set of hashes against the precompiled hash set
*
@ -896,11 +885,6 @@ namespace cryptonote
*/
bool has_block_weights(uint64_t height, uint64_t nblocks) const;
/**
* @brief flushes the bad txs cache
*/
void flush_bad_txs_cache();
/**
* @brief flushes the invalid block cache
*/
@ -915,6 +899,55 @@ namespace cryptonote
*/
bool get_txpool_complement(const std::vector<crypto::hash> &hashes, std::vector<cryptonote::blobdata> &txes);
/**
* @brief validates some simple properties of a transaction
*
* Currently checks: tx has inputs,
* tx inputs all of supported type(s),
* tx outputs valid (type, key, amount),
* input and output total amounts don't overflow,
* output amount <= input amount,
* tx not too large,
* each input has a different key image.
*
* @param tx the transaction to check
* @param tvc tx verification context where extra fail flags are stored
* @param hf_version hard fork version
*
* @return true if all the checks pass, otherwise false
*/
static bool check_tx_semantic(const transaction& tx, tx_verification_context& tvc,
uint8_t hf_version);
/**
* @brief verify that each input key image in a transaction is unique
*
* @param tx the transaction to check
*
* @return false if any key image is repeated, otherwise true
*/
static bool check_tx_inputs_keyimages_diff(const transaction& tx);
/**
* @brief verify that each ring uses distinct members
*
* @param tx the transaction to check
* @param hf_version the hard fork version rules to use
*
* @return false if any ring uses duplicate members, true otherwise
*/
static bool check_tx_inputs_ring_members_diff(const transaction& tx, const uint8_t hf_version);
/**
* @brief verify that each input key image in a transaction is in
* the valid domain
*
* @param tx the transaction to check
*
* @return false if any key image is not in the valid domain, otherwise true
*/
static bool check_tx_inputs_keyimages_domain(const transaction& tx);
private:
/**
@ -950,7 +983,8 @@ namespace cryptonote
*
* @note see Blockchain::add_new_block
*/
bool add_new_block(const block& b, block_verification_context& bvc);
bool add_new_block(const block& b, block_verification_context& bvc,
pool_supplement& extra_block_txs);
/**
* @brief load any core state stored on disk
@ -961,49 +995,6 @@ namespace cryptonote
*/
bool load_state_data();
/**
* @copydoc parse_tx_from_blob(transaction&, crypto::hash&, crypto::hash&, const blobdata&) const
*
* @note see parse_tx_from_blob(transaction&, crypto::hash&, crypto::hash&, const blobdata&) const
*/
bool parse_tx_from_blob(transaction& tx, crypto::hash& tx_hash, const blobdata& blob) const;
/**
* @brief check a transaction's syntax
*
* For now this does nothing, but it may check something about the tx
* in the future.
*
* @param tx the transaction to check
*
* @return true
*/
bool check_tx_syntax(const transaction& tx) const;
/**
* @brief validates some simple properties of a transaction
*
* Currently checks: tx has inputs,
* tx inputs all of supported type(s),
* tx outputs valid (type, key, amount),
* input and output total amounts don't overflow,
* output amount <= input amount,
* tx not too large,
* each input has a different key image.
*
* @param tx the transaction to check
* @param keeped_by_block if the transaction has been in a block
*
* @return true if all the checks pass, otherwise false
*/
bool check_tx_semantic(const transaction& tx, bool keeped_by_block) const;
void set_semantics_failed(const crypto::hash &tx_hash);
bool handle_incoming_tx_pre(const tx_blob_entry& tx_blob, tx_verification_context& tvc, cryptonote::transaction &tx, crypto::hash &tx_hash);
bool handle_incoming_tx_post(const tx_blob_entry& tx_blob, tx_verification_context& tvc, cryptonote::transaction &tx, crypto::hash &tx_hash);
struct tx_verification_batch_info { const cryptonote::transaction *tx; crypto::hash tx_hash; tx_verification_context &tvc; bool &result; };
bool handle_incoming_tx_accumulated_batch(std::vector<tx_verification_batch_info> &tx_info, bool keeped_by_block);
/**
* @copydoc miner::on_block_chain_update
*
@ -1022,35 +1013,6 @@ namespace cryptonote
*/
bool handle_command_line(const boost::program_options::variables_map& vm);
/**
* @brief verify that each input key image in a transaction is unique
*
* @param tx the transaction to check
*
* @return false if any key image is repeated, otherwise true
*/
bool check_tx_inputs_keyimages_diff(const transaction& tx) const;
/**
* @brief verify that each ring uses distinct members
*
* @param tx the transaction to check
* @param hf_version the hard fork version rules to use
*
* @return false if any ring uses duplicate members, true otherwise
*/
bool check_tx_inputs_ring_members_diff(const transaction& tx, const uint8_t hf_version) const;
/**
* @brief verify that each input key image in a transaction is in
* the valid domain
*
* @param tx the transaction to check
*
* @return false if any key image is not in the valid domain, otherwise true
*/
bool check_tx_inputs_keyimages_domain(const transaction& tx) const;
/**
* @brief attempts to relay any transactions in the mempool which need it
*
@ -1139,9 +1101,6 @@ namespace cryptonote
time_t start_time;
std::unordered_set<crypto::hash> bad_semantics_txes[2];
boost::mutex bad_semantics_txes_lock;
enum {
UPDATES_DISABLED,
UPDATES_NOTIFY,
@ -1153,15 +1112,9 @@ namespace cryptonote
size_t m_last_update_length;
boost::mutex m_update_mutex;
bool m_fluffy_blocks_enabled;
bool m_offline;
/* `boost::function` is used because the implementation never allocates if
the callable object has a single `std::shared_ptr` or `std::weap_ptr`
internally. Whereas, the libstdc++ `std::function` will allocate. */
std::shared_ptr<tools::Notify> m_block_rate_notify;
boost::function<void(std::vector<txpool_event>)> m_zmq_pub;
};
}

View File

@ -37,6 +37,7 @@
#include "tx_pool.h"
#include "cryptonote_tx_utils.h"
#include "cryptonote_basic/cryptonote_boost_serialization.h"
#include "cryptonote_basic/events.h"
#include "cryptonote_config.h"
#include "blockchain.h"
#include "blockchain_db/locked_txn.h"
@ -44,6 +45,7 @@
#include "int-util.h"
#include "misc_language.h"
#include "misc_log_ex.h"
#include "tx_verification_utils.h"
#include "warnings.h"
#include "common/perf_timer.h"
#include "crypto/hash.h"
@ -111,15 +113,6 @@ namespace cryptonote
return amount * ACCEPT_THRESHOLD;
}
uint64_t get_transaction_weight_limit(uint8_t version)
{
// from v8, limit a tx to 50% of the minimum block weight
if (version >= 8)
return get_min_block_weight(version) / 2 - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE;
else
return get_min_block_weight(version) - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE;
}
// external lock must be held for the comparison+set to work properly
void set_if_less(std::atomic<time_t>& next_check, const time_t candidate) noexcept
{
@ -142,7 +135,10 @@ namespace cryptonote
// corresponding lists.
}
//---------------------------------------------------------------------------------
bool tx_memory_pool::add_tx(transaction &tx, /*const crypto::hash& tx_prefix_hash,*/ const crypto::hash &id, const cryptonote::blobdata &blob, size_t tx_weight, tx_verification_context& tvc, relay_method tx_relay, bool relayed, uint8_t version)
bool tx_memory_pool::add_tx(transaction &tx, /*const crypto::hash& tx_prefix_hash,*/
const crypto::hash &id, const cryptonote::blobdata &blob, size_t tx_weight,
tx_verification_context& tvc, relay_method tx_relay, bool relayed,
uint8_t version, uint8_t nic_verified_hf_version)
{
const bool kept_by_block = (tx_relay == relay_method::block);
@ -150,13 +146,6 @@ namespace cryptonote
CRITICAL_REGION_LOCAL(m_transactions_lock);
PERF_TIMER(add_tx);
if (tx.version == 0)
{
// v0 never accepted
LOG_PRINT_L1("transaction version 0 is invalid");
tvc.m_verifivation_failed = true;
return false;
}
// we do not accept transactions that timed out before, unless they're
// kept_by_block
@ -168,49 +157,24 @@ namespace cryptonote
return false;
}
if(!check_inputs_types_supported(tx))
if (version != nic_verified_hf_version && !cryptonote::ver_non_input_consensus(tx, tvc, version))
{
tvc.m_verifivation_failed = true;
tvc.m_invalid_input = true;
LOG_PRINT_L1("transaction " << id << " failed non-input consensus rule checks");
tvc.m_verifivation_failed = true; // should already be set, but just in case
return false;
}
// fee per kilobyte, size rounded up.
uint64_t fee;
if (tx.version == 1)
bool fee_good = false;
try
{
uint64_t inputs_amount = 0;
if(!get_inputs_money_amount(tx, inputs_amount))
{
tvc.m_verifivation_failed = true;
return false;
}
uint64_t outputs_amount = get_outs_money_amount(tx);
if(outputs_amount > inputs_amount)
{
LOG_PRINT_L1("transaction use more money than it has: use " << print_money(outputs_amount) << ", have " << print_money(inputs_amount));
tvc.m_verifivation_failed = true;
tvc.m_overspend = true;
return false;
}
else if(outputs_amount == inputs_amount)
{
LOG_PRINT_L1("transaction fee is zero: outputs_amount == inputs_amount, rejecting.");
tvc.m_verifivation_failed = true;
tvc.m_fee_too_low = true;
return false;
}
fee = inputs_amount - outputs_amount;
// get_tx_fee() can throw. It shouldn't throw because we check preconditions in
// ver_non_input_consensus(), but let's put it in a try block just in case.
fee = get_tx_fee(tx);
fee_good = kept_by_block || m_blockchain.check_fee(tx_weight, fee);
}
else
{
fee = tx.rct_signatures.txnFee;
}
if (!kept_by_block && !m_blockchain.check_fee(tx_weight, fee))
catch(...) {}
if (!fee_good) // if fee calculation failed or fee in relayed tx is too low...
{
tvc.m_verifivation_failed = true;
tvc.m_fee_too_low = true;
@ -218,15 +182,6 @@ namespace cryptonote
return false;
}
size_t tx_weight_limit = get_transaction_weight_limit(version);
if ((!kept_by_block || version >= HF_VERSION_PER_BYTE_FEE) && tx_weight > tx_weight_limit)
{
LOG_PRINT_L1("transaction is too heavy: " << tx_weight << " bytes, maximum weight: " << tx_weight_limit);
tvc.m_verifivation_failed = true;
tvc.m_too_big = true;
return false;
}
size_t tx_extra_size = tx.extra.size();
if (!kept_by_block && tx_extra_size > MAX_TX_EXTRA_SIZE)
{
@ -262,14 +217,6 @@ namespace cryptonote
}
}
if (!m_blockchain.check_tx_outputs(tx, tvc))
{
LOG_PRINT_L1("Transaction with id= "<< id << " has at least one invalid output");
tvc.m_verifivation_failed = true;
tvc.m_invalid_output = true;
return false;
}
// assume failure during verification steps until success is certain
tvc.m_verifivation_failed = true;
@ -383,13 +330,13 @@ namespace cryptonote
add_tx_to_transient_lists(id, meta.fee / (double)(tx_weight ? tx_weight : 1), receive_time);
}
lock.commit();
tvc.m_added_to_pool = !existing_tx;
}
catch (const std::exception &e)
{
MERROR("internal error: error adding transaction to txpool: " << e.what());
return false;
}
tvc.m_added_to_pool = true;
static_assert(unsigned(relay_method::none) == 0, "expected relay_method::none value to be zero");
if(meta.fee > 0 && tx_relay != relay_method::forward)
@ -408,14 +355,16 @@ namespace cryptonote
return true;
}
//---------------------------------------------------------------------------------
bool tx_memory_pool::add_tx(transaction &tx, tx_verification_context& tvc, relay_method tx_relay, bool relayed, uint8_t version)
bool tx_memory_pool::add_tx(transaction &tx, tx_verification_context& tvc, relay_method tx_relay,
bool relayed, uint8_t version, uint8_t nic_verified_hf_version)
{
crypto::hash h = null_hash;
cryptonote::blobdata bl;
t_serializable_object_to_blob(tx, bl);
if (bl.size() == 0 || !get_transaction_hash(tx, h))
return false;
return add_tx(tx, h, bl, get_transaction_weight(tx, bl.size()), tvc, tx_relay, relayed, version);
return add_tx(tx, h, bl, get_transaction_weight(tx, bl.size()), tvc, tx_relay, relayed, version,
nic_verified_hf_version);
}
//---------------------------------------------------------------------------------
size_t tx_memory_pool::get_txpool_weight() const
@ -581,7 +530,7 @@ namespace cryptonote
return true;
}
//---------------------------------------------------------------------------------
bool tx_memory_pool::take_tx(const crypto::hash &id, transaction &tx, cryptonote::blobdata &txblob, size_t& tx_weight, uint64_t& fee, bool &relayed, bool &do_not_relay, bool &double_spend_seen, bool &pruned)
bool tx_memory_pool::take_tx(const crypto::hash &id, transaction &tx, cryptonote::blobdata &txblob, size_t& tx_weight, uint64_t& fee, bool &relayed, bool &do_not_relay, bool &double_spend_seen, bool &pruned, const bool suppress_missing_msgs)
{
CRITICAL_REGION_LOCAL(m_transactions_lock);
CRITICAL_REGION_LOCAL1(m_blockchain);
@ -593,7 +542,10 @@ namespace cryptonote
txpool_tx_meta_t meta;
if (!m_blockchain.get_txpool_tx_meta(id, meta))
{
MERROR("Failed to find tx_meta in txpool");
if (!suppress_missing_msgs)
{
MERROR("Failed to find tx_meta in txpool");
}
return false;
}
txblob = m_blockchain.get_txpool_tx_blob(id, relay_category::all);
@ -1463,44 +1415,21 @@ namespace cryptonote
bool parsed;
} lazy_tx(txblob, txid, tx);
//not the best implementation at this time, sorry :(
//check is ring_signature already checked ?
if(txd.max_used_block_id == null_hash)
{//not checked, lets try to check
const std::uint64_t top_block_height{m_blockchain.get_current_blockchain_height() - 1};
const crypto::hash top_block_hash{m_blockchain.get_block_id_by_height(top_block_height)};
if(txd.last_failed_id != null_hash && m_blockchain.get_current_blockchain_height() > txd.last_failed_height && txd.last_failed_id == m_blockchain.get_block_id_by_height(txd.last_failed_height))
return false;//we already sure that this tx is broken for this height
if (txd.last_failed_id == top_block_hash)
return false; // we are already sure that this tx isn't passing for this exact chain
tx_verification_context tvc;
if(!check_tx_inputs([&lazy_tx]()->cryptonote::transaction&{ return lazy_tx(); }, txid, txd.max_used_block_height, txd.max_used_block_id, tvc))
{
txd.last_failed_height = m_blockchain.get_current_blockchain_height()-1;
txd.last_failed_id = m_blockchain.get_block_id_by_height(txd.last_failed_height);
return false;
}
}else
tx_verification_context tvc{};
if (!check_tx_inputs([&lazy_tx]()->cryptonote::transaction&{ return lazy_tx(); },
txid,
txd.max_used_block_height,
txd.max_used_block_id,
tvc))
{
if(txd.max_used_block_height >= m_blockchain.get_current_blockchain_height())
return false;
if(true)
{
//if we already failed on this height and id, skip actual ring signature check
if(txd.last_failed_id == m_blockchain.get_block_id_by_height(txd.last_failed_height))
return false;
//check ring signature again, it is possible (with very small chance) that this transaction become again valid
tx_verification_context tvc;
if(!check_tx_inputs([&lazy_tx]()->cryptonote::transaction&{ return lazy_tx(); }, txid, txd.max_used_block_height, txd.max_used_block_id, tvc))
{
txd.last_failed_height = m_blockchain.get_current_blockchain_height()-1;
txd.last_failed_id = m_blockchain.get_block_id_by_height(txd.last_failed_height);
return false;
}
}
}
//if we here, transaction seems valid, but, anyway, check for key_images collisions with blockchain, just to be sure
if(m_blockchain.have_tx_keyimges_as_spent(lazy_tx()))
{
txd.double_spend_seen = true;
txd.last_failed_height = top_block_height;
txd.last_failed_id = top_block_hash;
return false;
}

View File

@ -117,7 +117,9 @@ namespace cryptonote
* @tx_relay how the transaction was received
* @param tx_weight the transaction's weight
*/
bool add_tx(transaction &tx, const crypto::hash &id, const cryptonote::blobdata &blob, size_t tx_weight, tx_verification_context& tvc, relay_method tx_relay, bool relayed, uint8_t version);
bool add_tx(transaction &tx, const crypto::hash &id, const cryptonote::blobdata &blob,
size_t tx_weight, tx_verification_context& tvc, relay_method tx_relay, bool relayed,
uint8_t version, uint8_t nic_verified_hf_version = 0);
/**
* @brief add a transaction to the transaction pool
@ -132,10 +134,18 @@ namespace cryptonote
* @tx_relay how the transaction was received
* @param relayed was this transaction from the network or a local client?
* @param version the version used to create the transaction
* @param nic_verified_hf_version hard fork which "tx" is known to pass non-input consensus test
*
* If "nic_verified_hf_version" parameter is equal to "version" parameter, then we skip the
* asserting `ver_non_input_consensus(tx)`, which greatly speeds up block popping and returning
* txs to mempool for txs which we know will pass the test. If nothing is known about how "tx"
* passes the non-input consensus tests (e.g. for newly received relayed txs), then leave
* "nic_verified_hf_version" as its default value of 0 (there is no v0 fork).
*
* @return true if the transaction passes validations, otherwise false
*/
bool add_tx(transaction &tx, tx_verification_context& tvc, relay_method tx_relay, bool relayed, uint8_t version);
bool add_tx(transaction &tx, tx_verification_context& tvc, relay_method tx_relay, bool relayed,
uint8_t version, uint8_t nic_verified_hf_version = 0);
/**
* @brief takes a transaction with the given hash from the pool
@ -149,10 +159,11 @@ namespace cryptonote
* @param do_not_relay return-by-reference is transaction not to be relayed to the network?
* @param double_spend_seen return-by-reference was a double spend seen for that transaction?
* @param pruned return-by-reference is the tx pruned
* @param suppress_missing_msgs suppress warning msgs when txid is missing (optional, defaults to `false`)
*
* @return true unless the transaction cannot be found in the pool
*/
bool take_tx(const crypto::hash &id, transaction &tx, cryptonote::blobdata &txblob, size_t& tx_weight, uint64_t& fee, bool &relayed, bool &do_not_relay, bool &double_spend_seen, bool &pruned);
bool take_tx(const crypto::hash &id, transaction &tx, cryptonote::blobdata &txblob, size_t& tx_weight, uint64_t& fee, bool &relayed, bool &do_not_relay, bool &double_spend_seen, bool &pruned, bool suppress_missing_msgs = false);
/**
* @brief checks if the pool has a transaction with the given hash

View File

@ -26,8 +26,12 @@
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <boost/iterator/transform_iterator.hpp>
#include "cryptonote_core/blockchain.h"
#include "cryptonote_core/cryptonote_core.h"
#include "cryptonote_core/tx_verification_utils.h"
#include "hardforks/hardforks.h"
#include "ringct/rctSigs.h"
#undef MONERO_DEFAULT_LOG_CATEGORY
@ -105,11 +109,104 @@ static crypto::hash calc_tx_mixring_hash(const transaction& tx, const rct::ctkey
return tx_and_mixring_hash;
}
static bool is_canonical_bulletproof_layout(const std::vector<rct::Bulletproof> &proofs)
{
if (proofs.size() != 1)
return false;
const size_t sz = proofs[0].V.size();
if (sz == 0 || sz > BULLETPROOF_MAX_OUTPUTS)
return false;
return true;
}
static bool is_canonical_bulletproof_plus_layout(const std::vector<rct::BulletproofPlus> &proofs)
{
if (proofs.size() != 1)
return false;
const size_t sz = proofs[0].V.size();
if (sz == 0 || sz > BULLETPROOF_PLUS_MAX_OUTPUTS)
return false;
return true;
}
template <class TxForwardIt>
static bool ver_non_input_consensus_templated(TxForwardIt tx_begin, TxForwardIt tx_end,
tx_verification_context& tvc, std::uint8_t hf_version)
{
std::vector<const rct::rctSig*> rvv;
rvv.reserve(static_cast<size_t>(std::distance(tx_begin, tx_end)));
const size_t max_tx_version = hf_version < HF_VERSION_DYNAMIC_FEE ? 1 : 2;
const size_t tx_weight_limit = get_transaction_weight_limit(hf_version);
for (; tx_begin != tx_end; ++tx_begin)
{
const transaction& tx = *tx_begin;
const uint64_t blob_size = get_transaction_blob_size(tx);
// Rule 1
if (blob_size > get_max_tx_size())
{
tvc.m_verifivation_failed = true;
tvc.m_too_big = true;
return false;
}
// Rule 2 & 3
if (tx.version == 0 || tx.version > max_tx_version)
{
tvc.m_verifivation_failed = true;
return false;
}
// Rule 4
const size_t tx_weight = get_transaction_weight(tx, blob_size);
if (hf_version >= HF_VERSION_PER_BYTE_FEE && tx_weight > tx_weight_limit)
{
tvc.m_verifivation_failed = true;
tvc.m_too_big = true;
return false;
}
// Rule 5
if (!core::check_tx_semantic(tx, tvc, hf_version))
return false;
// Rule 6
if (!Blockchain::check_tx_outputs(tx, tvc, hf_version) || tvc.m_verifivation_failed)
return false;
// We only want to check RingCT semantics if this is actually a RingCT transaction
if (tx.version >= 2)
rvv.push_back(&tx.rct_signatures);
}
// Rule 7
if (!ver_mixed_rct_semantics(std::move(rvv)))
{
tvc.m_verifivation_failed = true;
tvc.m_invalid_input = true;
return false;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cryptonote
{
uint64_t get_transaction_weight_limit(const uint8_t hf_version)
{
// from v8, limit a tx to 50% of the minimum block weight
if (hf_version >= HF_VERSION_PER_BYTE_FEE)
return get_min_block_weight(hf_version) / 2 - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE;
else
return get_min_block_weight(hf_version) - CRYPTONOTE_COINBASE_BLOB_RESERVED_SIZE;
}
bool ver_rct_non_semantics_simple_cached
(
transaction& tx,
@ -164,4 +261,104 @@ bool ver_rct_non_semantics_simple_cached
return true;
}
bool ver_mixed_rct_semantics(std::vector<const rct::rctSig*> rvv)
{
size_t batch_rv_size = 0; // this acts as an "end" iterator to the last simple batchable sig ptr
for (size_t i = 0; i < rvv.size(); ++i)
{
const rct::rctSig& rv = *rvv[i];
bool is_batchable_rv = false;
switch (rv.type)
{
case rct::RCTTypeNull:
// coinbase should not come here, so we reject for all other types
MERROR("Unexpected Null rctSig type");
return false;
break;
case rct::RCTTypeSimple:
if (!rct::verRctSemanticsSimple(rv))
{
MERROR("rct signature semantics check failed: type simple");
return false;
}
break;
case rct::RCTTypeFull:
if (!rct::verRct(rv, /*semantics=*/true))
{
MERROR("rct signature semantics check failed: type full");
return false;
}
break;
case rct::RCTTypeBulletproof:
case rct::RCTTypeBulletproof2:
case rct::RCTTypeCLSAG:
if (!is_canonical_bulletproof_layout(rv.p.bulletproofs))
{
MERROR("Bulletproof does not have canonical form");
return false;
}
is_batchable_rv = true;
break;
case rct::RCTTypeBulletproofPlus:
if (!is_canonical_bulletproof_plus_layout(rv.p.bulletproofs_plus))
{
MERROR("Bulletproof_plus does not have canonical form");
return false;
}
is_batchable_rv = true;
break;
default:
MERROR("Unknown rct type: " << rv.type);
return false;
break;
}
// Save this ring sig for later, as we will attempt simple RCT semantics batch verification
if (is_batchable_rv)
rvv[batch_rv_size++] = rvv[i];
}
if (batch_rv_size) // if any simple, batchable ring sigs...
{
rvv.resize(batch_rv_size);
if (!rct::verRctSemanticsSimple(rvv))
{
MERROR("rct signature semantics check failed: simple-style batch verification failed");
return false;
}
}
return true;
}
bool ver_non_input_consensus(const transaction& tx, tx_verification_context& tvc,
std::uint8_t hf_version)
{
return ver_non_input_consensus_templated(&tx, &tx + 1, tvc, hf_version);
}
bool ver_non_input_consensus(const pool_supplement& ps, tx_verification_context& tvc,
const std::uint8_t hf_version)
{
// We already verified the pool supplement for this hard fork version! Yippee!
if (ps.nic_verified_hf_version == hf_version)
return true;
const auto it_transform = [] (const decltype(ps.txs_by_txid)::value_type& in)
-> const transaction& { return in.second.first; };
const auto tx_begin = boost::make_transform_iterator(ps.txs_by_txid.cbegin(), it_transform);
const auto tx_end = boost::make_transform_iterator(ps.txs_by_txid.cend(), it_transform);
// Perform the checks...
const bool verified = ver_non_input_consensus_templated(tx_begin, tx_end, tvc, hf_version);
// Cache the hard fork version on success
if (verified)
ps.nic_verified_hf_version = hf_version;
return verified;
}
} // namespace cryptonote

View File

@ -30,10 +30,19 @@
#include "common/data_cache.h"
#include "cryptonote_basic/cryptonote_basic.h"
#include "cryptonote_basic/verification_context.h"
namespace cryptonote
{
/**
* @brief Get the maximum transaction weight for a given hardfork
*
* @param hf_version hard fork version
* @return the maximum unconditional transaction weight
*/
uint64_t get_transaction_weight_limit(uint8_t hf_version);
// Modifying this value should not affect consensus. You can adjust it for performance needs
static constexpr const size_t RCT_VER_CACHE_SIZE = 8192;
@ -75,4 +84,57 @@ bool ver_rct_non_semantics_simple_cached
std::uint8_t rct_type_to_cache
);
/**
* @brief Verify the semantics of a group of RingCT signatures as a batch (if applicable)
*
* Coinbase txs or other transaction with a RingCT type of RCTTypeNull will fail to verify.
*
* @param rvv list of signatures to verify
* @return true if all signatures verified semantics successfully, false otherwise
*/
bool ver_mixed_rct_semantics(std::vector<const rct::rctSig*> rvv);
/**
* @brief Used to provide transaction info that skips the mempool to block handling code
*/
struct pool_supplement
{
// Map of supplemental tx info that we might need to validate a block
// Maps TXID -> transaction and blob
std::unordered_map<crypto::hash, std::pair<transaction, blobdata>> txs_by_txid;
// If non-zero, then consider all the txs' non-input consensus (NIC) rules verified for this
// hard fork. User: If you add an unverified transaction to txs_by_txid, set this field to zero!
mutable std::uint8_t nic_verified_hf_version = 0;
};
/**
* @brief Verify every non-input consensus rule for a group of non-coinbase transactions
*
* List of checks that we do for each transaction:
* 1. Check tx blob size < get_max_tx_size()
* 2. Check tx version != 0
* 3. Check tx version is less than maximum for given hard fork version
* 4. Check tx weight < get_transaction_weight_limit()
* 5. Passes core::check_tx_semantic()
* 6. Passes Blockchain::check_tx_outputs()
* 7. Passes ver_mixed_rct_semantics() [Uses batch RingCT verification when applicable]
*
* For pool_supplement input:
* We assume the structure of the pool supplement is already correct: for each value entry, the
* cryptonote::transaction matches its corresponding blobdata and the TXID map key is correctly
* calculated for that transaction. We use the .nic_verified_hf_version field to skip verification
* for the pool supplement if hf_version matches, and we cache that version on success.
*
* @param tx single transaction to verify
* @param pool_supplement pool supplement to verify
* @param tvc relevant flags will be set for if/why verification failed
* @param hf_version Hard fork version to run rules against
* @return true if all relevant transactions verify, false otherwise
*/
bool ver_non_input_consensus(const transaction& tx, tx_verification_context& tvc,
std::uint8_t hf_version);
bool ver_non_input_consensus(const pool_supplement& ps, tx_verification_context& tvc,
std::uint8_t hf_version);
} // namespace cryptonote

View File

@ -146,7 +146,7 @@ namespace cryptonote
int handle_notify_get_txpool_complement(int command, NOTIFY_GET_TXPOOL_COMPLEMENT::request& arg, cryptonote_connection_context& context);
//----------------- i_bc_protocol_layout ---------------------------------------
virtual bool relay_block(NOTIFY_NEW_BLOCK::request& arg, cryptonote_connection_context& exclude_context);
virtual bool relay_block(NOTIFY_NEW_FLUFFY_BLOCK::request& arg, cryptonote_connection_context& exclude_context);
virtual bool relay_transactions(NOTIFY_NEW_TRANSACTIONS::request& arg, const boost::uuids::uuid& source, epee::net_utils::zone zone, relay_method tx_relay);
//----------------------------------------------------------------------------------
//bool get_payload_sync_data(HANDSHAKE_DATA::request& hshd, cryptonote_connection_context& context);

View File

@ -77,7 +77,77 @@
namespace cryptonote
{
template <class CryptoHashContainer>
inline bool make_pool_supplement_from_block_entry(
const std::vector<cryptonote::tx_blob_entry>& tx_entries,
const CryptoHashContainer& blk_tx_hashes,
cryptonote::pool_supplement& pool_supplement)
{
pool_supplement.nic_verified_hf_version = 0;
if (tx_entries.size() > blk_tx_hashes.size())
{
MERROR("Failed to make pool supplement: Too many transaction blobs!");
return false;
}
for (const auto& tx_entry: tx_entries)
{
if (tx_entry.blob.size() > get_max_tx_size())
{
MERROR("Transaction blob of length " << tx_entry.blob.size() << " is too large to unpack!");
return false;
}
cryptonote::transaction tx;
crypto::hash tx_hash;
if (!cryptonote::parse_and_validate_tx_from_blob(tx_entry.blob, tx, tx_hash)
|| !blk_tx_hashes.count(tx_hash)
|| tx.pruned)
{
MERROR("failed to parse and/or validate unpruned transaction as inside block: "
<< epee::string_tools::buff_to_hex_nodelimer(tx_entry.blob)
);
return false;
}
pool_supplement.txs_by_txid.emplace(tx_hash, std::make_pair(std::move(tx), tx_entry.blob));
}
return true;
}
inline bool make_full_pool_supplement_from_block_entry(
const cryptonote::block_complete_entry& blk_entry,
cryptonote::pool_supplement& pool_supplement)
{
cryptonote::block blk;
if (!cryptonote::parse_and_validate_block_from_blob(blk_entry.block, blk))
{
MERROR("sent bad block: failed to parse and/or validate block: "
<< epee::string_tools::buff_to_hex_nodelimer(blk_entry.block)
);
return false;
}
const std::unordered_set<crypto::hash> blk_tx_hashes(blk.tx_hashes.cbegin(), blk.tx_hashes.cend());
if (blk_tx_hashes.size() != blk_entry.txs.size())
{
MERROR("sent bad block entry: number of hashes is not equal number of tx blobs: "
<< epee::string_tools::buff_to_hex_nodelimer(blk_entry.block)
);
return false;
}
else if (blk_tx_hashes.size() != blk.tx_hashes.size())
{
MERROR("sent bad block entry: there are duplicate tx hashes in parsed block: "
<< epee::string_tools::buff_to_hex_nodelimer(blk_entry.block));
return false;
}
return make_pool_supplement_from_block_entry(blk_entry.txs, blk_tx_hashes, pool_supplement);
}
//-----------------------------------------------------------------------------------------------------------------------
@ -475,7 +545,21 @@ namespace cryptonote
template<class t_core>
int t_cryptonote_protocol_handler<t_core>::handle_notify_new_block(int command, NOTIFY_NEW_BLOCK::request& arg, cryptonote_connection_context& context)
{
// @TODO: Eventually drop support for this endpoint
MLOGIF_P2P_MESSAGE(crypto::hash hash; cryptonote::block b; bool ret = cryptonote::parse_and_validate_block_from_blob(arg.b.block, b, &hash);, ret, context << "Received NOTIFY_NEW_BLOCK " << hash << " (height " << arg.current_blockchain_height << ", " << arg.b.txs.size() << " txes)");
// Redirect this request form to fluffy block handling
NOTIFY_NEW_FLUFFY_BLOCK::request fluffy_arg;
fluffy_arg.b = std::move(arg.b);
fluffy_arg.current_blockchain_height = arg.current_blockchain_height;
return handle_notify_new_fluffy_block(command, fluffy_arg, context);
}
//------------------------------------------------------------------------------------------------------------------------
template<class t_core>
int t_cryptonote_protocol_handler<t_core>::handle_notify_new_fluffy_block(int command, NOTIFY_NEW_FLUFFY_BLOCK::request& arg, cryptonote_connection_context& context)
{
// If we are synchronizing the node or setting up this connection, then do nothing
if(context.m_state != cryptonote_connection_context::state_normal)
return 1;
if(!is_synchronized()) // can happen if a peer connection goes to normal but another thread still hasn't finished adding queued blocks
@ -483,59 +567,151 @@ namespace cryptonote
LOG_DEBUG_CC(context, "Received new block while syncing, ignored");
return 1;
}
m_core.pause_mine();
std::vector<block_complete_entry> blocks;
blocks.push_back(arg.b);
std::vector<block> pblocks;
if (!m_core.prepare_handle_incoming_blocks(blocks, pblocks))
// sanity check block blob size
if (!m_core.check_incoming_block_size(arg.b.block))
{
LOG_PRINT_CCONTEXT_L1("Block verification failed: prepare_handle_incoming_blocks failed, dropping connection");
drop_connection(context, false, false);
m_core.resume_mine();
return 1;
}
for(auto tx_blob_it = arg.b.txs.begin(); tx_blob_it!=arg.b.txs.end();tx_blob_it++)
// Parse and quick hash incoming block, dropping the connection on failure
block new_block;
crypto::hash new_block_hash;
if (!parse_and_validate_block_from_blob(arg.b.block, new_block, &new_block_hash))
{
cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc);
m_core.handle_incoming_tx(*tx_blob_it, tvc, relay_method::block, true);
if(tvc.m_verifivation_failed)
LOG_ERROR_CCONTEXT
(
"sent wrong block: failed to parse and validate block: "
<< epee::string_tools::buff_to_hex_nodelimer(arg.b.block)
<< ", dropping connection"
);
drop_connection(context, false, false);
return 1;
}
// Log block info
MLOG_P2P_MESSAGE(context << "Received NOTIFY_NEW_FLUFFY_BLOCK " << new_block_hash << " (height "
<< arg.current_blockchain_height << ", " << arg.b.txs.size() << " txes)");
// Pause mining and resume after block verification to prevent wasted mining cycles while
// validating the next block. Needs more research into if this is a DoS vector or not. Invalid
// block validation will cause disconnects and bans, so it might not be that bad.
m_core.pause_mine();
const auto resume_mine_on_leave = epee::misc_utils::create_scope_leave_handler([this](){ m_core.resume_mine(); });
// This set allows us to quickly sanity check that the block binds all txs contained in this
// fluffy payload, which means that no extra stowaway txs can be harbored. In the case of a
// deterministic block verification failure, the peer will be punished accordingly. For other
// cases, *once* valid PoW will be required to perform expensive consensus checks for the txs
// inside the block.
std::unordered_map<crypto::hash, uint64_t> blk_txids_set;
for (size_t tx_idx = 0; tx_idx < new_block.tx_hashes.size(); ++tx_idx)
blk_txids_set.emplace(new_block.tx_hashes[tx_idx], tx_idx);
// Check for duplicate txids in parsed block blob
if (blk_txids_set.size() != new_block.tx_hashes.size())
{
MERROR("sent bad block entry: there are duplicate tx hashes in parsed block: "
<< epee::string_tools::buff_to_hex_nodelimer(arg.b.block));
drop_connection(context, false, false);
return 1;
}
// Keeping a map of the full transactions provided in this payload allows us to pass them
// directly to core::handle_single_incoming_block() -> Blockchain::add_block(), which means we
// can skip the mempool for faster block propagation. Later in the function, we will erase all
// transactions from the relayed block.
pool_supplement extra_block_txs;
if (!make_pool_supplement_from_block_entry(arg.b.txs, blk_txids_set, extra_block_txs))
{
LOG_ERROR_CCONTEXT
(
"Failed to parse one or more transactions in fluffy block with ID " << new_block_hash <<
", dropping connection"
);
drop_connection(context, false, false);
return 1;
}
// try adding block to the blockchain
block_verification_context bvc = {};
const bool handle_block_res = m_core.handle_single_incoming_block(arg.b.block,
&new_block,
bvc,
extra_block_txs);
// handle result of attempted block add
if (!handle_block_res || bvc.m_verifivation_failed)
{
if (bvc.m_missing_txs)
{
LOG_PRINT_CCONTEXT_L1("Block verification failed: transaction verification failed, dropping connection");
drop_connection(context, false, false);
m_core.cleanup_handle_incoming_blocks();
m_core.resume_mine();
// Block verification failed b/c of missing transactions, so request fluffy block again with
// missing transactions (including the ones newly discovered in this fluffy block). Note that
// PoW checking happens before missing transactions checks, so if bvc.m_missing_txs is true,
// then that means that we passed PoW checking, so a peer can't get us to re-request fluffy
// blocks for free.
// Instead of requesting missing transactions by hash like BTC,
// we do it by index (thanks to a suggestion from moneromooo) because
// we're way cooler .. and also because they're smaller than hashes.
std::vector<uint64_t> need_tx_indices;
need_tx_indices.reserve(new_block.tx_hashes.size());
// Collect need_tx_indices by polling blockchain storage and mempool storage
for (size_t tx_idx = 0; tx_idx < new_block.tx_hashes.size(); ++tx_idx)
{
const crypto::hash &tx_hash = new_block.tx_hashes[tx_idx];
std::vector<cryptonote::blobdata> tx_blobs;
std::vector<crypto::hash> missed_txs;
bool need_tx = !m_core.pool_has_tx(tx_hash);
need_tx = need_tx && (!m_core.get_transactions({tx_hash}, tx_blobs, missed_txs, /*pruned=*/true)
|| !missed_txs.empty());
if (need_tx)
need_tx_indices.push_back(tx_idx);
}
// Make request form
MDEBUG("We are missing " << need_tx_indices.size() << " txes for this fluffy block");
for (auto txidx: need_tx_indices)
MDEBUG(" tx " << new_block.tx_hashes[txidx]);
NOTIFY_REQUEST_FLUFFY_MISSING_TX::request missing_tx_req;
missing_tx_req.block_hash = new_block_hash;
missing_tx_req.current_blockchain_height = arg.current_blockchain_height;
missing_tx_req.missing_tx_indices = std::move(need_tx_indices);
// Post NOTIFY_REQUEST_FLUFFY_MISSING_TX request to peer
MLOG_P2P_MESSAGE("-->>NOTIFY_REQUEST_FLUFFY_MISSING_TX: missing_tx_indices.size()=" << missing_tx_req.missing_tx_indices.size() );
post_notify<NOTIFY_REQUEST_FLUFFY_MISSING_TX>(missing_tx_req, context);
}
else // failure for some other reason besides missing txs...
{
// drop connection and punish peer
LOG_PRINT_CCONTEXT_L0("Block verification failed, dropping connection");
drop_connection_with_score(context, bvc.m_bad_pow ? P2P_IP_FAILS_BEFORE_BLOCK : 1, false);
return 1;
}
}
block_verification_context bvc = {};
m_core.handle_incoming_block(arg.b.block, pblocks.empty() ? NULL : &pblocks[0], bvc); // got block from handle_notify_new_block
if (!m_core.cleanup_handle_incoming_blocks(true))
else if( bvc.m_added_to_main_chain )
{
LOG_PRINT_CCONTEXT_L0("Failure in cleanup_handle_incoming_blocks");
m_core.resume_mine();
return 1;
}
m_core.resume_mine();
if(bvc.m_verifivation_failed)
{
LOG_PRINT_CCONTEXT_L0("Block verification failed, dropping connection");
drop_connection_with_score(context, bvc.m_bad_pow ? P2P_IP_FAILS_BEFORE_BLOCK : 1, false);
return 1;
}
if(bvc.m_added_to_main_chain)
{
//TODO: Add here announce protocol usage
// Relay an empty block
arg.b.txs.clear();
relay_block(arg, context);
}else if(bvc.m_marked_as_orphaned)
}
else if( bvc.m_marked_as_orphaned )
{
context.m_needed_objects.clear();
context.m_state = cryptonote_connection_context::state_synchronizing;
NOTIFY_REQUEST_CHAIN::request r = {};
context.m_expect_height = m_core.get_current_blockchain_height();
m_core.get_short_chain_history(r.block_ids);
r.prune = m_sync_pruned_blocks;
handler_request_blocks_history( r.block_ids ); // change the limit(?), sleep(?)
r.prune = m_sync_pruned_blocks;
context.m_last_request_time = boost::posix_time::microsec_clock::universal_time();
context.m_expect_response = NOTIFY_RESPONSE_CHAIN_ENTRY::ID;
MLOG_P2P_MESSAGE("-->>NOTIFY_REQUEST_CHAIN: m_block_ids.size()=" << r.block_ids.size() );
@ -547,309 +723,6 @@ namespace cryptonote
// and verify them with respect to what blocks we already have
CHECK_AND_ASSERT_MES(m_core.update_checkpoints(), 1, "One or more checkpoints loaded from json or dns conflicted with existing checkpoints.");
return 1;
}
//------------------------------------------------------------------------------------------------------------------------
template<class t_core>
int t_cryptonote_protocol_handler<t_core>::handle_notify_new_fluffy_block(int command, NOTIFY_NEW_FLUFFY_BLOCK::request& arg, cryptonote_connection_context& context)
{
MLOGIF_P2P_MESSAGE(crypto::hash hash; cryptonote::block b; bool ret = cryptonote::parse_and_validate_block_from_blob(arg.b.block, b, &hash);, ret, context << "Received NOTIFY_NEW_FLUFFY_BLOCK " << hash << " (height " << arg.current_blockchain_height << ", " << arg.b.txs.size() << " txes)");
if(context.m_state != cryptonote_connection_context::state_normal)
return 1;
if(!is_synchronized()) // can happen if a peer connection goes to normal but another thread still hasn't finished adding queued blocks
{
LOG_DEBUG_CC(context, "Received new block while syncing, ignored");
return 1;
}
m_core.pause_mine();
block new_block;
transaction miner_tx;
if(parse_and_validate_block_from_blob(arg.b.block, new_block))
{
// This is a second notification, we must have asked for some missing tx
if(!context.m_requested_objects.empty())
{
// What we asked for != to what we received ..
if(context.m_requested_objects.size() != arg.b.txs.size())
{
LOG_ERROR_CCONTEXT
(
"NOTIFY_NEW_FLUFFY_BLOCK -> request/response mismatch, "
<< "block = " << epee::string_tools::pod_to_hex(get_blob_hash(arg.b.block))
<< ", requested = " << context.m_requested_objects.size()
<< ", received = " << new_block.tx_hashes.size()
<< ", dropping connection"
);
drop_connection(context, false, false);
m_core.resume_mine();
return 1;
}
}
std::vector<tx_blob_entry> have_tx;
have_tx.reserve(new_block.tx_hashes.size());
// Instead of requesting missing transactions by hash like BTC,
// we do it by index (thanks to a suggestion from moneromooo) because
// we're way cooler .. and also because they're smaller than hashes.
//
// Also, remember to pepper some whitespace changes around to bother
// moneromooo ... only because I <3 him.
std::vector<uint64_t> need_tx_indices;
need_tx_indices.reserve(new_block.tx_hashes.size());
transaction tx;
crypto::hash tx_hash;
for(auto& tx_blob: arg.b.txs)
{
if(parse_and_validate_tx_from_blob(tx_blob.blob, tx))
{
try
{
if(!get_transaction_hash(tx, tx_hash))
{
LOG_PRINT_CCONTEXT_L1
(
"NOTIFY_NEW_FLUFFY_BLOCK: get_transaction_hash failed"
<< ", dropping connection"
);
drop_connection(context, false, false);
m_core.resume_mine();
return 1;
}
}
catch(...)
{
LOG_PRINT_CCONTEXT_L1
(
"NOTIFY_NEW_FLUFFY_BLOCK: get_transaction_hash failed"
<< ", exception thrown"
<< ", dropping connection"
);
drop_connection(context, false, false);
m_core.resume_mine();
return 1;
}
// hijacking m_requested objects in connection context to patch up
// a possible DOS vector pointed out by @monero-moo where peers keep
// sending (0...n-1) transactions.
// If requested objects is not empty, then we must have asked for
// some missing transacionts, make sure that they're all there.
//
// Can I safely re-use this field? I think so, but someone check me!
if(!context.m_requested_objects.empty())
{
auto req_tx_it = context.m_requested_objects.find(tx_hash);
if(req_tx_it == context.m_requested_objects.end())
{
LOG_ERROR_CCONTEXT
(
"Peer sent wrong transaction (NOTIFY_NEW_FLUFFY_BLOCK): "
<< "transaction with id = " << tx_hash << " wasn't requested, "
<< "dropping connection"
);
drop_connection(context, false, false);
m_core.resume_mine();
return 1;
}
context.m_requested_objects.erase(req_tx_it);
}
// we might already have the tx that the peer
// sent in our pool, so don't verify again..
if(!m_core.pool_has_tx(tx_hash))
{
MDEBUG("Incoming tx " << tx_hash << " not in pool, adding");
cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc);
if(!m_core.handle_incoming_tx(tx_blob, tvc, relay_method::block, true) || tvc.m_verifivation_failed)
{
LOG_PRINT_CCONTEXT_L1("Block verification failed: transaction verification failed, dropping connection");
drop_connection(context, false, false);
m_core.resume_mine();
return 1;
}
//
// future todo:
// tx should only not be added to pool if verification failed, but
// maybe in the future could not be added for other reasons
// according to monero-moo so keep track of these separately ..
//
}
}
else
{
LOG_ERROR_CCONTEXT
(
"sent wrong tx: failed to parse and validate transaction: "
<< epee::string_tools::buff_to_hex_nodelimer(tx_blob.blob)
<< ", dropping connection"
);
drop_connection(context, false, false);
m_core.resume_mine();
return 1;
}
}
// The initial size equality check could have been fooled if the sender
// gave us the number of transactions we asked for, but not the right
// ones. This check make sure the transactions we asked for were the
// ones we received.
if(context.m_requested_objects.size())
{
MERROR
(
"NOTIFY_NEW_FLUFFY_BLOCK: peer sent the number of transaction requested"
<< ", but not the actual transactions requested"
<< ", context.m_requested_objects.size() = " << context.m_requested_objects.size()
<< ", dropping connection"
);
drop_connection(context, false, false);
m_core.resume_mine();
return 1;
}
size_t tx_idx = 0;
for(auto& tx_hash: new_block.tx_hashes)
{
cryptonote::blobdata txblob;
if(m_core.get_pool_transaction(tx_hash, txblob, relay_category::broadcasted))
{
have_tx.push_back({txblob, crypto::null_hash});
}
else
{
std::vector<crypto::hash> tx_ids;
std::vector<transaction> txes;
std::vector<crypto::hash> missing;
tx_ids.push_back(tx_hash);
if (m_core.get_transactions(tx_ids, txes, missing) && missing.empty())
{
if (txes.size() == 1)
{
have_tx.push_back({tx_to_blob(txes.front()), crypto::null_hash});
}
else
{
MERROR("1 tx requested, none not found, but " << txes.size() << " returned");
m_core.resume_mine();
return 1;
}
}
else
{
MDEBUG("Tx " << tx_hash << " not found in pool");
need_tx_indices.push_back(tx_idx);
}
}
++tx_idx;
}
if(!need_tx_indices.empty()) // drats, we don't have everything..
{
// request non-mempool txs
MDEBUG("We are missing " << need_tx_indices.size() << " txes for this fluffy block");
for (auto txidx: need_tx_indices)
MDEBUG(" tx " << new_block.tx_hashes[txidx]);
NOTIFY_REQUEST_FLUFFY_MISSING_TX::request missing_tx_req;
missing_tx_req.block_hash = get_block_hash(new_block);
missing_tx_req.current_blockchain_height = arg.current_blockchain_height;
missing_tx_req.missing_tx_indices = std::move(need_tx_indices);
m_core.resume_mine();
MLOG_P2P_MESSAGE("-->>NOTIFY_REQUEST_FLUFFY_MISSING_TX: missing_tx_indices.size()=" << missing_tx_req.missing_tx_indices.size() );
post_notify<NOTIFY_REQUEST_FLUFFY_MISSING_TX>(missing_tx_req, context);
}
else // whoo-hoo we've got em all ..
{
MDEBUG("We have all needed txes for this fluffy block");
block_complete_entry b;
b.block = arg.b.block;
b.txs = have_tx;
std::vector<block_complete_entry> blocks;
blocks.push_back(b);
std::vector<block> pblocks;
if (!m_core.prepare_handle_incoming_blocks(blocks, pblocks))
{
LOG_PRINT_CCONTEXT_L0("Failure in prepare_handle_incoming_blocks");
m_core.resume_mine();
return 1;
}
block_verification_context bvc = {};
m_core.handle_incoming_block(arg.b.block, pblocks.empty() ? NULL : &pblocks[0], bvc); // got block from handle_notify_new_block
if (!m_core.cleanup_handle_incoming_blocks(true))
{
LOG_PRINT_CCONTEXT_L0("Failure in cleanup_handle_incoming_blocks");
m_core.resume_mine();
return 1;
}
m_core.resume_mine();
if( bvc.m_verifivation_failed )
{
LOG_PRINT_CCONTEXT_L0("Block verification failed, dropping connection");
drop_connection_with_score(context, bvc.m_bad_pow ? P2P_IP_FAILS_BEFORE_BLOCK : 1, false);
return 1;
}
if( bvc.m_added_to_main_chain )
{
//TODO: Add here announce protocol usage
NOTIFY_NEW_BLOCK::request reg_arg = AUTO_VAL_INIT(reg_arg);
reg_arg.current_blockchain_height = arg.current_blockchain_height;
reg_arg.b = b;
relay_block(reg_arg, context);
}
else if( bvc.m_marked_as_orphaned )
{
context.m_needed_objects.clear();
context.m_state = cryptonote_connection_context::state_synchronizing;
NOTIFY_REQUEST_CHAIN::request r = {};
context.m_expect_height = m_core.get_current_blockchain_height();
m_core.get_short_chain_history(r.block_ids);
handler_request_blocks_history( r.block_ids ); // change the limit(?), sleep(?)
r.prune = m_sync_pruned_blocks;
context.m_last_request_time = boost::posix_time::microsec_clock::universal_time();
context.m_expect_response = NOTIFY_RESPONSE_CHAIN_ENTRY::ID;
MLOG_P2P_MESSAGE("-->>NOTIFY_REQUEST_CHAIN: m_block_ids.size()=" << r.block_ids.size() );
post_notify<NOTIFY_REQUEST_CHAIN>(r, context);
MLOG_PEER_STATE("requesting chain");
}
// load json & DNS checkpoints every 10min/hour respectively,
// and verify them with respect to what blocks we already have
CHECK_AND_ASSERT_MES(m_core.update_checkpoints(), 1, "One or more checkpoints loaded from json or dns conflicted with existing checkpoints.");
}
}
else
{
LOG_ERROR_CCONTEXT
(
"sent wrong block: failed to parse and validate block: "
<< epee::string_tools::buff_to_hex_nodelimer(arg.b.block)
<< ", dropping connection"
);
m_core.resume_mine();
drop_connection(context, false, false);
return 1;
}
return 1;
}
//------------------------------------------------------------------------------------------------------------------------
@ -1036,7 +909,7 @@ namespace cryptonote
for (auto& tx : arg.txs)
{
tx_verification_context tvc{};
if (!m_core.handle_incoming_tx({tx, crypto::null_hash}, tvc, tx_relay, true) && !tvc.m_no_drop_offense)
if (!m_core.handle_incoming_tx(tx, tvc, tx_relay, true) && !tvc.m_no_drop_offense)
{
LOG_PRINT_CCONTEXT_L1("Tx verification failed, dropping connection");
drop_connection(context, false, false);
@ -1588,38 +1461,14 @@ namespace cryptonote
// process transactions
TIME_MEASURE_START(transactions_process_time);
num_txs += block_entry.txs.size();
std::vector<tx_verification_context> tvc;
m_core.handle_incoming_txs(block_entry.txs, tvc, relay_method::block, true);
if (tvc.size() != block_entry.txs.size())
pool_supplement block_txs;
if (!make_full_pool_supplement_from_block_entry(block_entry, block_txs))
{
LOG_ERROR_CCONTEXT("Internal error: tvc.size() != block_entry.txs.size()");
if (!m_core.cleanup_handle_incoming_blocks())
{
LOG_PRINT_CCONTEXT_L0("Failure in cleanup_handle_incoming_blocks");
return 1;
}
return 1;
}
std::vector<tx_blob_entry>::const_iterator it = block_entry.txs.begin();
for (size_t i = 0; i < tvc.size(); ++i, ++it)
{
if(tvc[i].m_verifivation_failed)
{
drop_connections(span_origin);
if (!m_p2p->for_connection(span_connection_id, [&](cryptonote_connection_context& context, nodetool::peerid_type peer_id, uint32_t f)->bool{
cryptonote::transaction tx;
crypto::hash txid;
if (it->prunable_hash == crypto::null_hash)
{
parse_and_validate_tx_from_blob(it->blob, tx, txid); // must succeed if we got here
}
else
{
parse_and_validate_tx_base_from_blob(it->blob, tx); // must succeed if we got here
txid = get_pruned_transaction_hash(tx, it->prunable_hash);
}
LOG_ERROR_CCONTEXT("transaction verification failed on NOTIFY_RESPONSE_GET_OBJECTS, tx_id = "
<< epee::string_tools::pod_to_hex(txid) << ", dropping connection");
LOG_ERROR_CCONTEXT("transaction parsing failed for 1 or more txs in NOTIFY_RESPONSE_GET_OBJECTS,"
"dropping connections");
drop_connection(context, false, true);
return 1;
}))
@ -1633,7 +1482,6 @@ namespace cryptonote
// in case the peer had dropped beforehand, remove the span anyway so other threads can wake up and get it
m_block_queue.remove_spans(span_connection_id, start_height);
return 1;
}
}
TIME_MEASURE_FINISH(transactions_process_time);
transactions_process_time_full += transactions_process_time;
@ -1643,7 +1491,11 @@ namespace cryptonote
TIME_MEASURE_START(block_process_time);
block_verification_context bvc = {};
m_core.handle_incoming_block(block_entry.block, pblocks.empty() ? NULL : &pblocks[blockidx], bvc, false); // <--- process block
m_core.handle_incoming_block(block_entry.block,
pblocks.empty() ? NULL : &pblocks[blockidx],
bvc,
block_txs,
false); // <--- process block
if(bvc.m_verifivation_failed)
{
@ -2761,31 +2613,17 @@ skip:
}
//------------------------------------------------------------------------------------------------------------------------
template<class t_core>
bool t_cryptonote_protocol_handler<t_core>::relay_block(NOTIFY_NEW_BLOCK::request& arg, cryptonote_connection_context& exclude_context)
bool t_cryptonote_protocol_handler<t_core>::relay_block(NOTIFY_NEW_FLUFFY_BLOCK::request& arg, cryptonote_connection_context& exclude_context)
{
NOTIFY_NEW_FLUFFY_BLOCK::request fluffy_arg = AUTO_VAL_INIT(fluffy_arg);
fluffy_arg.current_blockchain_height = arg.current_blockchain_height;
std::vector<tx_blob_entry> fluffy_txs;
fluffy_arg.b = arg.b;
fluffy_arg.b.txs = fluffy_txs;
// sort peers between fluffy ones and others
std::vector<std::pair<epee::net_utils::zone, boost::uuids::uuid>> fullConnections, fluffyConnections;
m_p2p->for_each_connection([this, &exclude_context, &fullConnections, &fluffyConnections](connection_context& context, nodetool::peerid_type peer_id, uint32_t support_flags)
std::vector<std::pair<epee::net_utils::zone, boost::uuids::uuid>> fluffyConnections;
m_p2p->for_each_connection([this, &exclude_context, &fluffyConnections](connection_context& context, nodetool::peerid_type peer_id, uint32_t support_flags)
{
// peer_id also filters out connections before handshake
if (peer_id && exclude_context.m_connection_id != context.m_connection_id && context.m_remote_address.get_zone() == epee::net_utils::zone::public_)
{
if(m_core.fluffy_blocks_enabled() && (support_flags & P2P_SUPPORT_FLAG_FLUFFY_BLOCKS))
{
LOG_DEBUG_CC(context, "PEER SUPPORTS FLUFFY BLOCKS - RELAYING THIN/COMPACT WHATEVER BLOCK");
fluffyConnections.push_back({context.m_remote_address.get_zone(), context.m_connection_id});
}
else
{
LOG_DEBUG_CC(context, "PEER DOESN'T SUPPORT FLUFFY BLOCKS - RELAYING FULL BLOCK");
fullConnections.push_back({context.m_remote_address.get_zone(), context.m_connection_id});
}
LOG_DEBUG_CC(context, "RELAYING FLUFFY BLOCK TO PEER");
fluffyConnections.push_back({context.m_remote_address.get_zone(), context.m_connection_id});
}
return true;
});
@ -2794,15 +2632,9 @@ skip:
if (!fluffyConnections.empty())
{
epee::levin::message_writer fluffyBlob{32 * 1024};
epee::serialization::store_t_to_binary(fluffy_arg, fluffyBlob.buffer);
epee::serialization::store_t_to_binary(arg, fluffyBlob.buffer);
m_p2p->relay_notify_to_list(NOTIFY_NEW_FLUFFY_BLOCK::ID, std::move(fluffyBlob), std::move(fluffyConnections));
}
if (!fullConnections.empty())
{
epee::levin::message_writer fullBlob{128 * 1024};
epee::serialization::store_t_to_binary(arg, fullBlob.buffer);
m_p2p->relay_notify_to_list(NOTIFY_NEW_BLOCK::ID, std::move(fullBlob), std::move(fullConnections));
}
return true;
}

View File

@ -41,7 +41,7 @@ namespace cryptonote
struct i_cryptonote_protocol
{
virtual bool is_synchronized() const = 0;
virtual bool relay_block(NOTIFY_NEW_BLOCK::request& arg, cryptonote_connection_context& exclude_context)=0;
virtual bool relay_block(NOTIFY_NEW_FLUFFY_BLOCK::request& arg, cryptonote_connection_context& exclude_context)=0;
virtual bool relay_transactions(NOTIFY_NEW_TRANSACTIONS::request& arg, const boost::uuids::uuid& source, epee::net_utils::zone zone, relay_method tx_relay)=0;
//virtual bool request_objects(NOTIFY_REQUEST_GET_OBJECTS::request& arg, cryptonote_connection_context& context)=0;
};
@ -55,7 +55,7 @@ namespace cryptonote
{
return false;
}
virtual bool relay_block(NOTIFY_NEW_BLOCK::request& arg, cryptonote_connection_context& exclude_context)
virtual bool relay_block(NOTIFY_NEW_FLUFFY_BLOCK::request& arg, cryptonote_connection_context& exclude_context)
{
return false;
}

View File

@ -1063,7 +1063,7 @@ bool t_command_parser_executor::set_bootstrap_daemon(const std::vector<std::stri
bool t_command_parser_executor::flush_cache(const std::vector<std::string>& args)
{
bool bad_txs = false, bad_blocks = false;
bool bad_blocks = false;
std::string arg;
if (args.empty())
@ -1072,18 +1072,16 @@ bool t_command_parser_executor::flush_cache(const std::vector<std::string>& args
for (size_t i = 0; i < args.size(); ++i)
{
arg = args[i];
if (arg == "bad-txs")
bad_txs = true;
else if (arg == "bad-blocks")
if (arg == "bad-blocks")
bad_blocks = true;
else
goto show_list;
}
return m_executor.flush_cache(bad_txs, bad_blocks);
return m_executor.flush_cache(bad_blocks);
show_list:
std::cout << "Invalid cache type: " << arg << std::endl;
std::cout << "Cache types: bad-txs bad-blocks" << std::endl;
std::cout << "Cache types: bad-blocks" << std::endl;
return true;
}

View File

@ -119,9 +119,9 @@ public:
if (shared)
{
core.get().get_blockchain_storage().set_txpool_notify(cryptonote::listener::zmq_pub::txpool_add{shared});
core.get().get_blockchain_storage().add_block_notify(cryptonote::listener::zmq_pub::chain_main{shared});
core.get().get_blockchain_storage().add_miner_notify(cryptonote::listener::zmq_pub::miner_data{shared});
core.get().set_txpool_listener(cryptonote::listener::zmq_pub::txpool_add{shared});
}
}
else // if --no-zmq specified

View File

@ -2447,14 +2447,13 @@ bool t_rpc_command_executor::set_bootstrap_daemon(
return true;
}
bool t_rpc_command_executor::flush_cache(bool bad_txs, bool bad_blocks)
bool t_rpc_command_executor::flush_cache(bool bad_blocks)
{
cryptonote::COMMAND_RPC_FLUSH_CACHE::request req;
cryptonote::COMMAND_RPC_FLUSH_CACHE::response res;
std::string fail_message = "Unsuccessful";
epee::json_rpc::error error_resp;
req.bad_txs = bad_txs;
req.bad_blocks = bad_blocks;
if (m_is_rpc)

View File

@ -172,7 +172,7 @@ public:
bool rpc_payments();
bool flush_cache(bool bad_txs, bool invalid_blocks);
bool flush_cache(bool invalid_blocks);
};
} // namespace daemonize

View File

@ -1387,7 +1387,7 @@ namespace cryptonote
if (!skip_validation)
{
tx_verification_context tvc{};
if(!m_core.handle_incoming_tx({tx_blob, crypto::null_hash}, tvc, (req.do_not_relay ? relay_method::none : relay_method::local), false) || tvc.m_verifivation_failed)
if(!m_core.handle_incoming_tx(tx_blob, tvc, (req.do_not_relay ? relay_method::none : relay_method::local), false) || tvc.m_verifivation_failed)
{
res.status = "Failed";
std::string reason = "";
@ -3579,8 +3579,6 @@ namespace cryptonote
bool core_rpc_server::on_flush_cache(const COMMAND_RPC_FLUSH_CACHE::request& req, COMMAND_RPC_FLUSH_CACHE::response& res, epee::json_rpc::error& error_resp, const connection_context *ctx)
{
RPC_TRACKER(flush_cache);
if (req.bad_txs)
m_core.flush_bad_txs_cache();
if (req.bad_blocks)
m_core.flush_invalid_blocks();
res.status = CORE_RPC_STATUS_OK;

View File

@ -2778,12 +2778,10 @@ inline const std::string get_rpc_status(const bool trusted_daemon, const std::st
{
struct request_t: public rpc_request_base
{
bool bad_txs;
bool bad_blocks;
BEGIN_KV_SERIALIZE_MAP()
KV_SERIALIZE_PARENT(rpc_request_base)
KV_SERIALIZE_OPT(bad_txs, false)
KV_SERIALIZE_OPT(bad_blocks, false)
END_KV_SERIALIZE_MAP()
};

View File

@ -370,7 +370,7 @@ namespace rpc
tx_verification_context tvc = AUTO_VAL_INIT(tvc);
if(!m_core.handle_incoming_tx({tx_blob, crypto::null_hash}, tvc, (relay ? relay_method::local : relay_method::none), false) || tvc.m_verifivation_failed)
if(!m_core.handle_incoming_tx(tx_blob, tvc, (relay ? relay_method::local : relay_method::none), false) || tvc.m_verifivation_failed)
{
if (tvc.m_verifivation_failed)
{

View File

@ -598,7 +598,7 @@ public:
cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc);
size_t pool_size = m_c.get_pool_transactions_count();
m_c.handle_incoming_tx({t_serializable_object_to_blob(tx), crypto::null_hash}, tvc, m_tx_relay, false);
m_c.handle_incoming_tx(t_serializable_object_to_blob(tx), tvc, m_tx_relay, false);
bool tx_added = pool_size + 1 == m_c.get_pool_transactions_count();
bool r = m_validator.check_tx_verification_context(tvc, tx_added, m_ev_index, tx);
CHECK_AND_NO_ASSERT_MES(r, false, "tx verification context check failed");
@ -609,16 +609,17 @@ public:
{
log_event("cryptonote::transaction");
std::vector<cryptonote::tx_blob_entry> tx_blobs;
std::vector<cryptonote::blobdata> tx_blobs;
std::vector<cryptonote::tx_verification_context> tvcs;
cryptonote::tx_verification_context tvc0 = AUTO_VAL_INIT(tvc0);
for (const auto &tx: txs)
{
tx_blobs.push_back({t_serializable_object_to_blob(tx)});
tx_blobs.emplace_back(t_serializable_object_to_blob(tx));
tvcs.push_back(tvc0);
}
size_t pool_size = m_c.get_pool_transactions_count();
m_c.handle_incoming_txs(tx_blobs, tvcs, m_tx_relay, false);
for (size_t i = 0; i < tx_blobs.size(); ++i)
m_c.handle_incoming_tx(tx_blobs[i], tvcs[i], m_tx_relay, false);
size_t tx_added = m_c.get_pool_transactions_count() - pool_size;
bool r = m_validator.check_tx_verification_context_array(tvcs, tx_added, m_ev_index, txs);
CHECK_AND_NO_ASSERT_MES(r, false, "tx verification context check failed");

View File

@ -43,7 +43,10 @@ class P2PTest():
self.create()
self.mine(80)
self.test_p2p_reorg()
self.test_p2p_tx_propagation()
txid = self.test_p2p_tx_propagation()
self.test_p2p_block_propagation_shared(txid)
txid = self.test_p2p_tx_propagation()
self.test_p2p_block_propagation_new(txid)
def reset(self):
print('Resetting blockchain')
@ -157,7 +160,6 @@ class P2PTest():
loops -= 1
assert loops >= 0
def test_p2p_tx_propagation(self):
print('Testing P2P tx propagation')
daemons = (Daemon(idx=2), Daemon(idx=3))
@ -201,6 +203,107 @@ class P2PTest():
npending = len(pending_daemons)
assert npending == 0, '%d daemons pending' % npending
return txid
def test_p2p_block_propagation_shared(self, mempool_txid):
print('Testing P2P block propagation with shared TX')
daemon2 = Daemon(idx = 2)
daemon3 = Daemon(idx = 3)
# check precondition: txid in daemon2's and daemon3's mempool
res = daemon2.get_transaction_pool_hashes()
assert mempool_txid in res.get('tx_hashes', [])
res = daemon3.get_transaction_pool_hashes()
assert mempool_txid in res.get('tx_hashes', [])
# mine block on daemon2
res = daemon2.generateblocks('42ey1afDFnn4886T7196doS9GPMzexD9gXpsZJDwVjeRVdFCSoHnv7KPbBeGpzJBzHRCAs9UxqeoyFQMYbqSWYTfJJQAWDm', 1)
block_height = res.height
# wait until both are synced, or 5 seconds, whichever is first.
# the timeout time should be very, very low here since block propagation, unlike tx propagation, is designed
# to be as fast as possible.
# and since both daemons already have the tx in their mempools, a notification of a new fluffy block, which is
# pushed out immediately upon being mined, should result in an instant addition of that block to the chain,
# without any round trips.
# this test should only fail if you're running it on a potato where PoW verification + check_tx_inputs() takes
# more than 5 second.
deadline = time.monotonic() + 5
result = None
while result is None:
res2 = daemon2.get_info()
res3 = daemon3.get_info()
if res2.top_block_hash == res3.top_block_hash:
result = True
elif time.monotonic() > deadline:
result = False
else:
time.sleep(.25)
assert result, "Shared tx block propagation timed out"
# check the tx is moved to both daemons's blockchains at top block
for daemon in [daemon2, daemon3]:
res = daemon.get_transaction_pool_hashes()
assert not 'tx_hashes' in res or len(res.tx_hashes) == 0
res = daemon.get_transactions([mempool_txid])
assert len(res.get('txs', [])) == 1
tx_details = res.txs[0]
assert ('in_pool' not in tx_details) or (not tx_details.in_pool)
assert tx_details.block_height == block_height
def test_p2p_block_propagation_new(self, mempool_txid):
# there's a big problem with this testcase in that there's not yet a way to prevent daemon's from syncing
# mempools only, but still allow block propagation. so there's a good chance that the transaction will be synced
# between daemons between when daemon2's mempool is flushed and when daemon3 mines a new block. in this
# scenario, this testcase basically just degenerates into test_p2p_block_propagation_shared(). however, if this
# one ever fails but test_p2p_block_propagation_shared() passes, then we might have actually caught a problem
# with block propagation when one of the daemons is missing a tx(s)
print('Testing P2P block propagation with (possibly) new TX')
daemon2 = Daemon(idx = 2)
daemon3 = Daemon(idx = 3)
# check precondition: txid in daemon3's mempool
res = daemon3.get_transaction_pool_hashes()
assert mempool_txid in res.get('tx_hashes', [])
# flush daemon2 mempool
daemon2.flush_txpool()
# mine block on daemon3
res = daemon3.generateblocks('42ey1afDFnn4886T7196doS9GPMzexD9gXpsZJDwVjeRVdFCSoHnv7KPbBeGpzJBzHRCAs9UxqeoyFQMYbqSWYTfJJQAWDm', 1)
block_height = res.height
# wait until both are synced, or 5 seconds, whichever is first.
# the timeout time should be very, very low here since block propagation, unlike tx propagation, is designed
# to be as fast as possible. however, it might have to be raised if the daemon actually does make a round trip
# to request a missing tx in a fluffy block
deadline = time.monotonic() + 5
result = None
while result is None:
res2 = daemon2.get_info()
res3 = daemon3.get_info()
if res2.top_block_hash == res3.top_block_hash:
result = True
elif time.monotonic() > deadline:
result = False
else:
time.sleep(.25)
assert result, "New tx block propagation timed out"
# check the tx is moved to both daemons's blockchains at top block
for daemon in [daemon2, daemon3]:
res = daemon.get_transaction_pool_hashes()
assert not 'tx_hashes' in res or len(res.tx_hashes) == 0
res = daemon.get_transactions([mempool_txid])
assert len(res.get('txs', [])) == 1
tx_details = res.txs[0]
assert ('in_pool' not in tx_details) or (not tx_details.in_pool)
assert tx_details.block_height == block_height
if __name__ == '__main__':
P2PTest().run_test()

View File

@ -61,9 +61,10 @@ public:
bool have_block(const crypto::hash& id, int *where = NULL) const {return false;}
bool have_block_unlocked(const crypto::hash& id, int *where = NULL) const {return false;}
void get_blockchain_top(uint64_t& height, crypto::hash& top_id)const{height=0;top_id=crypto::null_hash;}
bool handle_incoming_tx(const cryptonote::tx_blob_entry& tx_blob, cryptonote::tx_verification_context& tvc, cryptonote::relay_method tx_relay, bool relayed) { return true; }
bool handle_incoming_txs(const std::vector<cryptonote::tx_blob_entry>& tx_blob, std::vector<cryptonote::tx_verification_context>& tvc, cryptonote::relay_method tx_relay, bool relayed) { return true; }
bool handle_incoming_tx(const cryptonote::blobdata& tx_blob, cryptonote::tx_verification_context& tvc, cryptonote::relay_method tx_relay, bool relayed) { return true; }
bool handle_single_incoming_block(const cryptonote::blobdata& block_blob, const cryptonote::block *b, cryptonote::block_verification_context& bvc, cryptonote::pool_supplement& extra_block_txs, bool update_miner_blocktemplate = true) { return true; }
bool handle_incoming_block(const cryptonote::blobdata& block_blob, const cryptonote::block *block, cryptonote::block_verification_context& bvc, bool update_miner_blocktemplate = true) { return true; }
bool handle_incoming_block(const cryptonote::blobdata& block_blob, const cryptonote::block *block, cryptonote::block_verification_context& bvc, cryptonote::pool_supplement& extra_block_txs, bool update_miner_blocktemplate = true) { return true; }
void pause_mine(){}
void resume_mine(){}
bool on_idle(){return true;}
@ -74,6 +75,7 @@ public:
bool get_test_drop_download_height() const {return true;}
bool prepare_handle_incoming_blocks(const std::vector<cryptonote::block_complete_entry> &blocks_entry, std::vector<cryptonote::block> &blocks) { return true; }
bool cleanup_handle_incoming_blocks(bool force_sync = false) { return true; }
bool check_incoming_block_size(const cryptonote::blobdata& block_blob) const { return true; }
bool update_checkpoints(const bool skip_dns = false) { return true; }
uint64_t get_target_blockchain_height() const { return 1; }
size_t get_block_sync_size(uint64_t height) const { return BLOCKS_SYNCHRONIZING_DEFAULT_COUNT; }
@ -82,6 +84,7 @@ public:
bool get_pool_transaction(const crypto::hash& id, cryptonote::blobdata& tx_blob, cryptonote::relay_category tx_category) const { return false; }
bool pool_has_tx(const crypto::hash &txid) const { return false; }
bool get_blocks(uint64_t start_offset, size_t count, std::vector<std::pair<cryptonote::blobdata, cryptonote::block>>& blocks, std::vector<cryptonote::blobdata>& txs) const { return false; }
bool get_transactions(const std::vector<crypto::hash>& txs_ids, std::vector<cryptonote::blobdata>& txs, std::vector<crypto::hash>& missed_txs, bool pruned = false) const { return false; }
bool get_transactions(const std::vector<crypto::hash>& txs_ids, std::vector<cryptonote::transaction>& txs, std::vector<crypto::hash>& missed_txs) const { return false; }
bool get_block_by_hash(const crypto::hash &h, cryptonote::block &blk, bool *orphan = NULL) const { return false; }
uint8_t get_ideal_hard_fork_version() const { return 0; }
@ -89,7 +92,6 @@ public:
uint8_t get_hard_fork_version(uint64_t height) const { return 0; }
uint64_t get_earliest_ideal_height_for_version(uint8_t version) const { return 0; }
cryptonote::difficulty_type get_block_cumulative_difficulty(uint64_t height) const { return 0; }
bool fluffy_blocks_enabled() const { return false; }
uint64_t prevalidate_block_hashes(uint64_t height, const std::vector<crypto::hash> &hashes, const std::vector<uint64_t> &weights) { return 0; }
bool pad_transactions() { return false; }
uint32_t get_blockchain_pruning_seed() const { return 0; }