Implemented growing the tree on sync + lots of cleaning

- validate output and commitment in tuple conversion function
- function to get_unlock_height from height in chain + unlock_time
- tx_outs_to_leaf_tuples function
- cleaned up trim impl (reduced num params in instructions and
conditional complexity)
- renamed locked_outputs table to locked_leaves (clearer tie to
merkle tree)
- size_t -> uint64_t for db compatibility across 32-bit and 64-bit
machines
- added hash_grow tests
This commit is contained in:
j-berman 2024-07-24 12:13:39 -07:00
parent 8a89c20f3b
commit 306488b690
17 changed files with 622 additions and 372 deletions

View File

@ -294,9 +294,31 @@ uint64_t BlockchainDB::add_block( const std::pair<block, blobdata>& blck
TIME_MEASURE_FINISH(time1); TIME_MEASURE_FINISH(time1);
time_add_transaction += time1; time_add_transaction += time1;
// When adding a block, we also need to add all the leaf tuples included in
// the block to a table keeping track of locked leaf tuples. Once those leaf
// tuples unlock, we use them to grow the tree.
std::multimap<uint64_t, fcmp::curve_trees::CurveTreesV1::LeafTuple> leaf_tuples_by_unlock_height;
// Get miner tx's leaf tuples
fcmp::curve_trees::curve_trees_v1.tx_outs_to_leaf_tuples(
blk.miner_tx,
prev_height,
true/*miner_tx*/,
leaf_tuples_by_unlock_height);
// Get all other txs' leaf tuples
for (const auto &txp : txs)
{
fcmp::curve_trees::curve_trees_v1.tx_outs_to_leaf_tuples(
txp.first,
prev_height,
false/*miner_tx*/,
leaf_tuples_by_unlock_height);
}
// call out to subclass implementation to add the block & metadata // call out to subclass implementation to add the block & metadata
time1 = epee::misc_utils::get_tick_count(); time1 = epee::misc_utils::get_tick_count();
add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash); add_block(blk, block_weight, long_term_block_weight, cumulative_difficulty, coins_generated, num_rct_outs, blk_hash, leaf_tuples_by_unlock_height);
TIME_MEASURE_FINISH(time1); TIME_MEASURE_FINISH(time1);
time_add_block1 += time1; time_add_block1 += time1;

View File

@ -399,6 +399,7 @@ private:
* @param cumulative_difficulty the accumulated difficulty after this block * @param cumulative_difficulty the accumulated difficulty after this block
* @param coins_generated the number of coins generated total after this block * @param coins_generated the number of coins generated total after this block
* @param blk_hash the hash of the block * @param blk_hash the hash of the block
* @param leaf_tuples_by_unlock_height the leaves from this block to add to the merkle tree
*/ */
virtual void add_block( const block& blk virtual void add_block( const block& blk
, size_t block_weight , size_t block_weight
@ -407,6 +408,7 @@ private:
, const uint64_t& coins_generated , const uint64_t& coins_generated
, uint64_t num_rct_outs , uint64_t num_rct_outs
, const crypto::hash& blk_hash , const crypto::hash& blk_hash
, const std::multimap<uint64_t, fcmp::curve_trees::CurveTreesV1::LeafTuple>& leaf_tuples_by_unlock_height
) = 0; ) = 0;
/** /**
@ -1394,6 +1396,17 @@ public:
*/ */
virtual uint64_t get_num_outputs(const uint64_t& amount) const = 0; virtual uint64_t get_num_outputs(const uint64_t& amount) const = 0;
// returns the total number of global outputs
/**
* @brief fetches the total number of global outputs
*
* The subclass should return a count of all outputs,
* or zero if there are none.
* *
* @return the number of global outputs
*/
virtual uint64_t get_num_global_outputs() const = 0;
/** /**
* @brief return index of the first element (should be hidden, but isn't) * @brief return index of the first element (should be hidden, but isn't)
* *
@ -1769,10 +1782,10 @@ public:
virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees,
const std::vector<fcmp::curve_trees::CurveTreesV1::LeafTuple> &new_leaves) = 0; const std::vector<fcmp::curve_trees::CurveTreesV1::LeafTuple> &new_leaves) = 0;
virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples) = 0; virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) = 0;
// TODO: description // TODO: description
virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t expected_n_leaf_tuples) const = 0; virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t expected_n_leaf_tuples) const = 0;
// //
// Hard fork related storage // Hard fork related storage

View File

@ -216,7 +216,7 @@ namespace
* *
* spent_keys input hash - * spent_keys input hash -
* *
* locked_outputs block ID [{leaf tuple}...] * locked_leaves block ID [{leaf tuple}...]
* leaves leaf_idx {leaf tuple} * leaves leaf_idx {leaf tuple}
* layers layer_idx [{child_chunk_idx, child_chunk_hash}...] * layers layer_idx [{child_chunk_idx, child_chunk_hash}...]
* *
@ -249,8 +249,8 @@ const char* const LMDB_OUTPUT_TXS = "output_txs";
const char* const LMDB_OUTPUT_AMOUNTS = "output_amounts"; const char* const LMDB_OUTPUT_AMOUNTS = "output_amounts";
const char* const LMDB_SPENT_KEYS = "spent_keys"; const char* const LMDB_SPENT_KEYS = "spent_keys";
// Curve trees tree types // Curve trees merkle tree tables
const char* const LMDB_LOCKED_OUTPUTS = "locked_outputs"; const char* const LMDB_LOCKED_LEAVES = "locked_leaves";
const char* const LMDB_LEAVES = "leaves"; const char* const LMDB_LEAVES = "leaves";
const char* const LMDB_LAYERS = "layers"; const char* const LMDB_LAYERS = "layers";
@ -817,7 +817,7 @@ estim:
} }
void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t long_term_block_weight, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t long_term_block_weight, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated,
uint64_t num_rct_outs, const crypto::hash& blk_hash) uint64_t num_rct_outs, const crypto::hash& blk_hash, const std::multimap<uint64_t, fcmp::curve_trees::CurveTreesV1::LeafTuple> &leaf_tuples_by_unlock_height)
{ {
LOG_PRINT_L3("BlockchainLMDB::" << __func__); LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open(); check_open();
@ -845,6 +845,14 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l
throw0(BLOCK_PARENT_DNE("Top block is not new block's parent")); throw0(BLOCK_PARENT_DNE("Top block is not new block's parent"));
} }
// Grow the tree with outputs that unlock at this block height
const auto unlocked_leaf_tuples = this->get_locked_leaf_tuples_at_height(m_height);
// TODO: double check consistent order for inserting outputs into the tree
this->grow_tree(fcmp::curve_trees::curve_trees_v1, unlocked_leaf_tuples);
// TODO: remove unlocked_leaf_tuples from the locked outputs table
int result = 0; int result = 0;
MDB_val_set(key, m_height); MDB_val_set(key, m_height);
@ -878,6 +886,8 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l
bi.bi_cum_rct += bi_prev->bi_cum_rct; bi.bi_cum_rct += bi_prev->bi_cum_rct;
} }
bi.bi_long_term_block_weight = long_term_block_weight; bi.bi_long_term_block_weight = long_term_block_weight;
bi.bi_n_leaf_tuples = this->get_num_leaf_tuples();
bi.bi_tree_root = this->get_tree_root();
MDB_val_set(val, bi); MDB_val_set(val, bi);
result = mdb_cursor_put(m_cur_block_info, (MDB_val *)&zerokval, &val, MDB_APPENDDUP); result = mdb_cursor_put(m_cur_block_info, (MDB_val *)&zerokval, &val, MDB_APPENDDUP);
@ -888,6 +898,21 @@ void BlockchainLMDB::add_block(const block& blk, size_t block_weight, uint64_t l
if (result) if (result)
throw0(DB_ERROR(lmdb_error("Failed to add block height by hash to db transaction: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to add block height by hash to db transaction: ", result).c_str()));
CURSOR(locked_leaves)
// Add the locked leaf tuples from this block to the locked outputs table
for (const auto &locked_tuple : leaf_tuples_by_unlock_height)
{
MDB_val_set(k_height, locked_tuple.first);
MDB_val_set(v_tuple, locked_tuple.second);
// MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent
// Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height
result = mdb_cursor_put(m_cur_locked_leaves, &k_height, &v_tuple, MDB_NODUPDATA);
if (result != MDB_SUCCESS && result != MDB_KEYEXIST)
throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str()));
}
// we use weight as a proxy for size, since we don't have size but weight is >= size // we use weight as a proxy for size, since we don't have size but weight is >= size
// and often actually equal // and often actually equal
m_cum_size += block_weight; m_cum_size += block_weight;
@ -1347,10 +1372,12 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
check_open(); check_open();
mdb_txn_cursors *m_cursors = &m_wcursors; mdb_txn_cursors *m_cursors = &m_wcursors;
CHECK_AND_ASSERT_THROW_MES(m_write_txn != nullptr, "Must have m_write_txn set to grow tree");
CURSOR(leaves) CURSOR(leaves)
// Get the number of leaf tuples that exist in the tree // Get the number of leaf tuples that exist in the tree
const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples();
// Read every layer's last hashes // Read every layer's last hashes
const auto last_hashes = this->get_tree_last_hashes(); const auto last_hashes = this->get_tree_last_hashes();
@ -1361,9 +1388,9 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
// Insert the leaves // Insert the leaves
// TODO: grow_leaves // TODO: grow_leaves
const auto &leaves = tree_extension.leaves; const auto &leaves = tree_extension.leaves;
for (std::size_t i = 0; i < leaves.tuples.size(); ++i) for (uint64_t i = 0; i < leaves.tuples.size(); ++i)
{ {
MDB_val_copy<std::size_t> k(i + leaves.start_leaf_tuple_idx); MDB_val_copy<uint64_t> k(i + leaves.start_leaf_tuple_idx);
MDB_val_set(v, leaves.tuples[i]); MDB_val_set(v, leaves.tuples[i]);
// TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency. // TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency.
@ -1381,11 +1408,11 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions"); CHECK_AND_ASSERT_THROW_MES(!c2_extensions.empty(), "empty c2 extensions");
bool use_c2 = true; bool use_c2 = true;
std::size_t c2_idx = 0; uint64_t c2_idx = 0;
std::size_t c1_idx = 0; uint64_t c1_idx = 0;
for (std::size_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i) for (uint64_t i = 0; i < (c2_extensions.size() + c1_extensions.size()); ++i)
{ {
const std::size_t layer_idx = c2_idx + c1_idx; const uint64_t layer_idx = c2_idx + c1_idx;
MDEBUG("Growing layer " << layer_idx); MDEBUG("Growing layer " << layer_idx);
if (use_c2) if (use_c2)
@ -1422,8 +1449,8 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
template<typename C> template<typename C>
void BlockchainLMDB::grow_layer(const C &curve, void BlockchainLMDB::grow_layer(const C &curve,
const std::vector<fcmp::curve_trees::LayerExtension<C>> &layer_extensions, const std::vector<fcmp::curve_trees::LayerExtension<C>> &layer_extensions,
const std::size_t ext_idx, const uint64_t ext_idx,
const std::size_t layer_idx) const uint64_t layer_idx)
{ {
LOG_PRINT_L3("BlockchainLMDB::" << __func__); LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open(); check_open();
@ -1438,7 +1465,7 @@ void BlockchainLMDB::grow_layer(const C &curve,
// TODO: make sure ext.start_idx lines up with the end of the layer // TODO: make sure ext.start_idx lines up with the end of the layer
MDB_val_copy<std::size_t> k(layer_idx); MDB_val_copy<uint64_t> k(layer_idx);
if (ext.update_existing_last_hash) if (ext.update_existing_last_hash)
{ {
@ -1456,7 +1483,7 @@ void BlockchainLMDB::grow_layer(const C &curve,
} }
// Now add all the new hashes found in the extension // Now add all the new hashes found in the extension
for (std::size_t i = ext.update_existing_last_hash ? 1 : 0; i < ext.hashes.size(); ++i) for (uint64_t i = ext.update_existing_last_hash ? 1 : 0; i < ext.hashes.size(); ++i)
{ {
layer_val<C> lv; layer_val<C> lv;
lv.child_chunk_idx = i + ext.start_idx; lv.child_chunk_idx = i + ext.start_idx;
@ -1472,7 +1499,7 @@ void BlockchainLMDB::grow_layer(const C &curve,
} }
} }
void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples) void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples)
{ {
// TODO: block_wtxn_start like pop_block, then call BlockchainDB::trim_tree // TODO: block_wtxn_start like pop_block, then call BlockchainDB::trim_tree
LOG_PRINT_L3("BlockchainLMDB::" << __func__); LOG_PRINT_L3("BlockchainLMDB::" << __func__);
@ -1484,7 +1511,7 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves");
const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples(); const uint64_t old_n_leaf_tuples = this->get_num_leaf_tuples();
CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist");
const auto trim_instructions = curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples); const auto trim_instructions = curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples);
@ -1507,11 +1534,11 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
// Trim the leaves // Trim the leaves
// TODO: trim_leaves // TODO: trim_leaves
for (std::size_t i = 0; i < trim_n_leaf_tuples; ++i) for (uint64_t i = 0; i < trim_n_leaf_tuples; ++i)
{ {
std::size_t last_leaf_tuple_idx = (old_n_leaf_tuples - 1 - i); uint64_t last_leaf_tuple_idx = (old_n_leaf_tuples - 1 - i);
MDB_val_copy<std::size_t> k(last_leaf_tuple_idx); MDB_val_copy<uint64_t> k(last_leaf_tuple_idx);
MDB_val v; MDB_val v;
int result = mdb_cursor_get(m_cur_leaves, &k, &v, MDB_SET); int result = mdb_cursor_get(m_cur_leaves, &k, &v, MDB_SET);
if (result == MDB_NOTFOUND) if (result == MDB_NOTFOUND)
@ -1533,9 +1560,9 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
CHECK_AND_ASSERT_THROW_MES(!c2_layer_reductions.empty(), "empty c2 layer reductions"); CHECK_AND_ASSERT_THROW_MES(!c2_layer_reductions.empty(), "empty c2 layer reductions");
bool use_c2 = true; bool use_c2 = true;
std::size_t c2_idx = 0; uint64_t c2_idx = 0;
std::size_t c1_idx = 0; uint64_t c1_idx = 0;
for (std::size_t i = 0; i < (c2_layer_reductions.size() + c1_layer_reductions.size()); ++i) for (uint64_t i = 0; i < (c2_layer_reductions.size() + c1_layer_reductions.size()); ++i)
{ {
if (use_c2) if (use_c2)
{ {
@ -1557,7 +1584,7 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
// Trim any remaining layers in layers after the root // Trim any remaining layers in layers after the root
// TODO: trim_leftovers_after_root // TODO: trim_leftovers_after_root
const std::size_t expected_root_idx = c2_layer_reductions.size() + c1_layer_reductions.size() - 1; const uint64_t expected_root_idx = c2_layer_reductions.size() + c1_layer_reductions.size() - 1;
while (1) while (1)
{ {
MDB_val k, v; MDB_val k, v;
@ -1565,7 +1592,7 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
if (result != MDB_SUCCESS) if (result != MDB_SUCCESS)
throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str()));
const std::size_t last_layer_idx = *(std::size_t *)k.mv_data; const uint64_t last_layer_idx = *(uint64_t *)k.mv_data;
if (last_layer_idx > expected_root_idx) if (last_layer_idx > expected_root_idx)
{ {
@ -1586,7 +1613,7 @@ void BlockchainLMDB::trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
template<typename C> template<typename C>
void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction<C> &layer_reduction, void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction<C> &layer_reduction,
const std::size_t layer_idx) const uint64_t layer_idx)
{ {
LOG_PRINT_L3("BlockchainLMDB::" << __func__); LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open(); check_open();
@ -1594,11 +1621,11 @@ void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction<C> &laye
CURSOR(layers) CURSOR(layers)
MDB_val_copy<std::size_t> k(layer_idx); MDB_val_copy<uint64_t> k(layer_idx);
// Get the number of existing elements in the layer // Get the number of existing elements in the layer
// TODO: get_num_elems_in_layer // TODO: get_num_elems_in_layer
std::size_t old_n_elems_in_layer = 0; uint64_t old_n_elems_in_layer = 0;
{ {
// Get the first record in a layer so we can then get the last record // Get the first record in a layer so we can then get the last record
MDB_val v; MDB_val v;
@ -1606,8 +1633,6 @@ void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction<C> &laye
if (result != MDB_SUCCESS) if (result != MDB_SUCCESS)
throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str()));
// TODO: why can't I just use MDB_LAST_DUP once and get the last record?
result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP); result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP);
if (result != MDB_SUCCESS) if (result != MDB_SUCCESS)
throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to get last elem: ", result).c_str()));
@ -1618,12 +1643,12 @@ void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction<C> &laye
CHECK_AND_ASSERT_THROW_MES(old_n_elems_in_layer >= layer_reduction.new_total_parents, CHECK_AND_ASSERT_THROW_MES(old_n_elems_in_layer >= layer_reduction.new_total_parents,
"unexpected old n elems in layer"); "unexpected old n elems in layer");
const std::size_t trim_n_elems_in_layer = old_n_elems_in_layer - layer_reduction.new_total_parents; const uint64_t trim_n_elems_in_layer = old_n_elems_in_layer - layer_reduction.new_total_parents;
// Delete the elements // Delete the elements
for (std::size_t i = 0; i < trim_n_elems_in_layer; ++i) for (uint64_t i = 0; i < trim_n_elems_in_layer; ++i)
{ {
std::size_t last_elem_idx = (old_n_elems_in_layer - 1 - i); uint64_t last_elem_idx = (old_n_elems_in_layer - 1 - i);
MDB_val_set(v, last_elem_idx); MDB_val_set(v, last_elem_idx);
int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH); int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH);
@ -1655,7 +1680,7 @@ void BlockchainLMDB::trim_layer(const fcmp::curve_trees::LayerReduction<C> &laye
} }
} }
std::size_t BlockchainLMDB::get_num_leaf_tuples() const uint64_t BlockchainLMDB::get_num_leaf_tuples() const
{ {
LOG_PRINT_L3("BlockchainLMDB::" << __func__); LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open(); check_open();
@ -1672,7 +1697,7 @@ std::size_t BlockchainLMDB::get_num_leaf_tuples() const
if (result == MDB_NOTFOUND) if (result == MDB_NOTFOUND)
n_leaf_tuples = 0; n_leaf_tuples = 0;
else if (result == MDB_SUCCESS) else if (result == MDB_SUCCESS)
n_leaf_tuples = (1 + (*(const std::size_t*)k.mv_data)); n_leaf_tuples = (1 + (*(const uint64_t*)k.mv_data));
else else
throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to get last leaf: ", result).c_str()));
} }
@ -1697,7 +1722,7 @@ std::array<uint8_t, 32UL> BlockchainLMDB::get_tree_root() const
int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST); int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST);
if (result == MDB_SUCCESS) if (result == MDB_SUCCESS)
{ {
const std::size_t layer_idx = *(std::size_t*)k.mv_data; const uint64_t layer_idx = *(uint64_t*)k.mv_data;
if ((layer_idx % 2) == 0) if ((layer_idx % 2) == 0)
{ {
const auto *lv = (layer_val<fcmp::curve_trees::Selene> *)v.mv_data; const auto *lv = (layer_val<fcmp::curve_trees::Selene> *)v.mv_data;
@ -1731,10 +1756,10 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes
auto &c2_last_hashes = last_hashes.c2_last_hashes; auto &c2_last_hashes = last_hashes.c2_last_hashes;
// Traverse the tree layer-by-layer starting at the layer closest to leaf layer // Traverse the tree layer-by-layer starting at the layer closest to leaf layer
std::size_t layer_idx = 0; uint64_t layer_idx = 0;
while (1) while (1)
{ {
MDB_val_copy<std::size_t> k(layer_idx); MDB_val_copy<uint64_t> k(layer_idx);
MDB_val v; MDB_val v;
// Get the first record in a layer so we can then get the last record // Get the first record in a layer so we can then get the last record
@ -1744,8 +1769,6 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_tree_last_hashes
if (result != MDB_SUCCESS) if (result != MDB_SUCCESS)
throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to get first record in layer: ", result).c_str()));
// TODO: why can't I just use MDB_LAST_DUP once and get the last record?
// Get the last record in a layer // Get the last record in a layer
result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP); result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_LAST_DUP);
if (result != MDB_SUCCESS) if (result != MDB_SUCCESS)
@ -1795,15 +1818,14 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las
std::vector<fcmp::curve_trees::Selene::Scalar> leaves_to_trim; std::vector<fcmp::curve_trees::Selene::Scalar> leaves_to_trim;
if (trim_leaf_layer_instructions.need_last_chunk_children_to_trim || if (trim_leaf_layer_instructions.end_trim_idx > trim_leaf_layer_instructions.start_trim_idx)
(trim_leaf_layer_instructions.need_last_chunk_remaining_children && trim_leaf_layer_instructions.new_offset > 0))
{ {
std::size_t idx = trim_leaf_layer_instructions.start_trim_idx; uint64_t idx = trim_leaf_layer_instructions.start_trim_idx;
CHECK_AND_ASSERT_THROW_MES(idx % fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE == 0, CHECK_AND_ASSERT_THROW_MES(idx % fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE == 0,
"expected divisble by leaf tuple size"); "expected divisble by leaf tuple size");
const std::size_t leaf_tuple_idx = idx / fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE; const uint64_t leaf_tuple_idx = idx / fcmp::curve_trees::CurveTreesV1::LEAF_TUPLE_SIZE;
MDB_val_copy<std::size_t> k(leaf_tuple_idx); MDB_val_copy<uint64_t> k(leaf_tuple_idx);
MDB_cursor_op leaf_op = MDB_SET; MDB_cursor_op leaf_op = MDB_SET;
do do
@ -1833,18 +1855,17 @@ fcmp::curve_trees::CurveTreesV1::LastChunkChildrenToTrim BlockchainLMDB::get_las
// Traverse the tree layer-by-layer starting at the layer closest to leaf layer, getting children to trim // Traverse the tree layer-by-layer starting at the layer closest to leaf layer, getting children to trim
// TODO: separate function for layers // TODO: separate function for layers
bool parent_is_c1 = true; bool parent_is_c1 = true;
for (std::size_t i = 1; i < trim_instructions.size(); ++i) for (uint64_t i = 1; i < trim_instructions.size(); ++i)
{ {
const auto &trim_layer_instructions = trim_instructions[i]; const auto &trim_layer_instructions = trim_instructions[i];
std::vector<fcmp::curve_trees::Helios::Scalar> c1_children; std::vector<fcmp::curve_trees::Helios::Scalar> c1_children;
std::vector<fcmp::curve_trees::Selene::Scalar> c2_children; std::vector<fcmp::curve_trees::Selene::Scalar> c2_children;
if (trim_layer_instructions.need_last_chunk_children_to_trim || if (trim_layer_instructions.end_trim_idx > trim_layer_instructions.start_trim_idx)
(trim_layer_instructions.need_last_chunk_remaining_children && trim_layer_instructions.new_offset > 0))
{ {
const std::size_t layer_idx = (i - 1); const uint64_t layer_idx = (i - 1);
std::size_t idx = trim_layer_instructions.start_trim_idx; uint64_t idx = trim_layer_instructions.start_trim_idx;
MDB_val_set(k, layer_idx); MDB_val_set(k, layer_idx);
MDB_val_set(v, idx); MDB_val_set(v, idx);
@ -1903,12 +1924,12 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_t
fcmp::curve_trees::CurveTreesV1::LastHashes last_hashes_out; fcmp::curve_trees::CurveTreesV1::LastHashes last_hashes_out;
// Traverse the tree layer-by-layer starting at the layer closest to leaf layer // Traverse the tree layer-by-layer starting at the layer closest to leaf layer
std::size_t layer_idx = 0; uint64_t layer_idx = 0;
for (const auto &trim_layer_instructions : trim_instructions) for (const auto &trim_layer_instructions : trim_instructions)
{ {
const std::size_t new_last_idx = trim_layer_instructions.new_total_parents - 1; const uint64_t new_last_idx = trim_layer_instructions.new_total_parents - 1;
MDB_val_copy<std::size_t> k(layer_idx); MDB_val_copy<uint64_t> k(layer_idx);
MDB_val_set(v, new_last_idx); MDB_val_set(v, new_last_idx);
int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH); int result = mdb_cursor_get(m_cur_layers, &k, &v, MDB_GET_BOTH);
@ -1937,7 +1958,7 @@ fcmp::curve_trees::CurveTreesV1::LastHashes BlockchainLMDB::get_last_hashes_to_t
} }
bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees,
const std::size_t expected_n_leaf_tuples) const const uint64_t expected_n_leaf_tuples) const
{ {
LOG_PRINT_L3("BlockchainLMDB::" << __func__); LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open(); check_open();
@ -1946,12 +1967,23 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre
RCURSOR(leaves) RCURSOR(leaves)
RCURSOR(layers) RCURSOR(layers)
const std::size_t actual_n_leaf_tuples = this->get_num_leaf_tuples(); const uint64_t actual_n_leaf_tuples = this->get_num_leaf_tuples();
CHECK_AND_ASSERT_MES(actual_n_leaf_tuples == expected_n_leaf_tuples, false, "unexpected num leaf tuples"); CHECK_AND_ASSERT_MES(actual_n_leaf_tuples == expected_n_leaf_tuples, false, "unexpected num leaf tuples");
if (actual_n_leaf_tuples == 0)
{
// Make sure layers table is also empty
MDB_stat db_stats;
int result = mdb_stat(m_txn, m_layers, &db_stats);
if (result)
throw0(DB_ERROR(lmdb_error("Failed to query m_layers: ", result).c_str()));
CHECK_AND_ASSERT_MES(db_stats.ms_entries == 0, false, "unexpected num layer entries");
return true;
}
// Check chunks of leaves hash into first layer as expected // Check chunks of leaves hash into first layer as expected
std::size_t layer_idx = 0; uint64_t layer_idx = 0;
std::size_t child_chunk_idx = 0; uint64_t child_chunk_idx = 0;
MDB_cursor_op leaf_op = MDB_FIRST; MDB_cursor_op leaf_op = MDB_FIRST;
while (1) while (1)
{ {
@ -1978,7 +2010,7 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre
} }
// Get the actual leaf chunk hash from the db // Get the actual leaf chunk hash from the db
MDB_val_copy<std::size_t> k_parent(layer_idx); MDB_val_copy<uint64_t> k_parent(layer_idx);
MDB_val_set(v_parent, child_chunk_idx); MDB_val_set(v_parent, child_chunk_idx);
MDEBUG("Getting leaf chunk hash starting at child_chunk_idx " << child_chunk_idx); MDEBUG("Getting leaf chunk hash starting at child_chunk_idx " << child_chunk_idx);
@ -2004,11 +2036,12 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre
const fcmp::curve_trees::Selene::Chunk chunk{leaves.data(), leaves.size()}; const fcmp::curve_trees::Selene::Chunk chunk{leaves.data(), leaves.size()};
// Hash the chunk of leaves // Hash the chunk of leaves
for (std::size_t i = 0; i < leaves.size(); ++i) for (uint64_t i = 0; i < leaves.size(); ++i)
MDEBUG("Hashing " << curve_trees.m_c2.to_string(leaves[i])); MDEBUG("Hashing " << curve_trees.m_c2.to_string(leaves[i]));
const fcmp::curve_trees::Selene::Point chunk_hash = fcmp::curve_trees::get_new_parent(curve_trees.m_c2, chunk); const fcmp::curve_trees::Selene::Point chunk_hash = fcmp::curve_trees::get_new_parent(curve_trees.m_c2, chunk);
MDEBUG("chunk_hash " << curve_trees.m_c2.to_string(chunk_hash) << " (" << leaves.size() << " leaves)"); MDEBUG("chunk_hash " << curve_trees.m_c2.to_string(chunk_hash) << " , hash init point: "
<< curve_trees.m_c2.to_string(curve_trees.m_c2.m_hash_init_point) << " (" << leaves.size() << " leaves)");
// Now compare to value from the db // Now compare to value from the db
const auto *lv = (layer_val<fcmp::curve_trees::Selene> *)v_parent.mv_data; const auto *lv = (layer_val<fcmp::curve_trees::Selene> *)v_parent.mv_data;
@ -2064,10 +2097,10 @@ bool BlockchainLMDB::audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tre
template<typename C_CHILD, typename C_PARENT> template<typename C_CHILD, typename C_PARENT>
bool BlockchainLMDB::audit_layer(const C_CHILD &c_child, bool BlockchainLMDB::audit_layer(const C_CHILD &c_child,
const C_PARENT &c_parent, const C_PARENT &c_parent,
const std::size_t layer_idx, const uint64_t layer_idx,
const std::size_t child_start_idx, const uint64_t child_start_idx,
const std::size_t child_chunk_idx, const uint64_t child_chunk_idx,
const std::size_t chunk_width) const const uint64_t chunk_width) const
{ {
LOG_PRINT_L3("BlockchainLMDB::" << __func__); LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open(); check_open();
@ -2082,7 +2115,7 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child,
std::vector<typename C_CHILD::Point> child_chunk; std::vector<typename C_CHILD::Point> child_chunk;
child_chunk.reserve(chunk_width); child_chunk.reserve(chunk_width);
MDB_val_copy<std::size_t> k_child(layer_idx); MDB_val_copy<uint64_t> k_child(layer_idx);
MDB_val_set(v_child, child_start_idx); MDB_val_set(v_child, child_start_idx);
MDB_cursor_op op_child = MDB_GET_BOTH; MDB_cursor_op op_child = MDB_GET_BOTH;
while (1) while (1)
@ -2102,8 +2135,8 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child,
} }
// Get the actual chunk hash from the db // Get the actual chunk hash from the db
const std::size_t parent_layer_idx = layer_idx + 1; const uint64_t parent_layer_idx = layer_idx + 1;
MDB_val_copy<std::size_t> k_parent(parent_layer_idx); MDB_val_copy<uint64_t> k_parent(parent_layer_idx);
MDB_val_set(v_parent, child_chunk_idx); MDB_val_set(v_parent, child_chunk_idx);
// Check for end conditions // Check for end conditions
@ -2146,11 +2179,12 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child,
child_scalars.emplace_back(c_child.point_to_cycle_scalar(child)); child_scalars.emplace_back(c_child.point_to_cycle_scalar(child));
const typename C_PARENT::Chunk chunk{child_scalars.data(), child_scalars.size()}; const typename C_PARENT::Chunk chunk{child_scalars.data(), child_scalars.size()};
for (std::size_t i = 0; i < child_scalars.size(); ++i) for (uint64_t i = 0; i < child_scalars.size(); ++i)
MDEBUG("Hashing " << c_parent.to_string(child_scalars[i])); MDEBUG("Hashing " << c_parent.to_string(child_scalars[i]));
const auto chunk_hash = fcmp::curve_trees::get_new_parent(c_parent, chunk); const auto chunk_hash = fcmp::curve_trees::get_new_parent(c_parent, chunk);
MDEBUG("chunk_hash " << c_parent.to_string(chunk_hash) << " (" << child_scalars.size() << " children)"); MDEBUG("chunk_hash " << c_parent.to_string(chunk_hash) << " , hash init point: "
<< c_parent.to_string(c_parent.m_hash_init_point) << " (" << child_scalars.size() << " children)");
const auto *lv = (layer_val<C_PARENT> *)v_parent.mv_data; const auto *lv = (layer_val<C_PARENT> *)v_parent.mv_data;
MDEBUG("Actual chunk hash " << c_parent.to_string(lv->child_chunk_hash)); MDEBUG("Actual chunk hash " << c_parent.to_string(lv->child_chunk_hash));
@ -2169,6 +2203,57 @@ bool BlockchainLMDB::audit_layer(const C_CHILD &c_child,
chunk_width); chunk_width);
} }
std::vector<fcmp::curve_trees::CurveTreesV1::LeafTuple> BlockchainLMDB::get_locked_leaf_tuples_at_height(
const uint64_t height)
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(locked_leaves)
MDB_val_set(k_height, height);
MDB_val v_tuple;
// Get all the locked outputs at that height
std::vector<fcmp::curve_trees::CurveTreesV1::LeafTuple> leaf_tuples;
// TODO: double check this gets all leaf tuples when it does multiple iters
MDB_cursor_op op = MDB_SET;
while (1)
{
int result = mdb_cursor_get(m_cur_locked_leaves, &k_height, &v_tuple, op);
if (result == MDB_NOTFOUND)
break;
if (result != MDB_SUCCESS)
throw0(DB_ERROR(lmdb_error("Failed to get next locked outputs: ", result).c_str()));
op = MDB_NEXT_MULTIPLE;
const uint64_t h = *(const uint64_t*)k_height.mv_data;
if (h != height)
throw0(DB_ERROR(("Height " + std::to_string(h) + " not the expected" + std::to_string(height)).c_str()));
const auto range_begin = ((const fcmp::curve_trees::CurveTreesV1::LeafTuple*)v_tuple.mv_data);
const auto range_end = range_begin + v_tuple.mv_size / sizeof(fcmp::curve_trees::CurveTreesV1::LeafTuple);
auto it = range_begin;
// The first MDB_NEXT_MULTIPLE includes the val from MDB_SET, so skip it
if (leaf_tuples.size() == 1)
++it;
while (it < range_end)
{
leaf_tuples.push_back(*it);
++it;
}
}
TXN_POSTFIX_RDONLY();
return leaf_tuples;
}
BlockchainLMDB::~BlockchainLMDB() BlockchainLMDB::~BlockchainLMDB()
{ {
LOG_PRINT_L3("BlockchainLMDB::" << __func__); LOG_PRINT_L3("BlockchainLMDB::" << __func__);
@ -2318,7 +2403,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags)
lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys"); lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED, m_spent_keys, "Failed to open db handle for m_spent_keys");
lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); lmdb_db_open(txn, LMDB_LOCKED_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_leaves, "Failed to open db handle for m_locked_leaves");
lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves"); lmdb_db_open(txn, LMDB_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_leaves, "Failed to open db handle for m_leaves");
lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers"); lmdb_db_open(txn, LMDB_LAYERS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_layers, "Failed to open db handle for m_layers");
@ -2341,7 +2426,7 @@ void BlockchainLMDB::open(const std::string& filename, const int db_flags)
mdb_set_dupsort(txn, m_block_heights, compare_hash32); mdb_set_dupsort(txn, m_block_heights, compare_hash32);
mdb_set_dupsort(txn, m_tx_indices, compare_hash32); mdb_set_dupsort(txn, m_tx_indices, compare_hash32);
mdb_set_dupsort(txn, m_output_amounts, compare_uint64); mdb_set_dupsort(txn, m_output_amounts, compare_uint64);
mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); mdb_set_dupsort(txn, m_locked_leaves, compare_uint64);
mdb_set_dupsort(txn, m_leaves, compare_uint64); mdb_set_dupsort(txn, m_leaves, compare_uint64);
mdb_set_dupsort(txn, m_layers, compare_uint64); mdb_set_dupsort(txn, m_layers, compare_uint64);
mdb_set_dupsort(txn, m_output_txs, compare_uint64); mdb_set_dupsort(txn, m_output_txs, compare_uint64);
@ -2521,8 +2606,8 @@ void BlockchainLMDB::reset()
throw0(DB_ERROR(lmdb_error("Failed to drop m_output_amounts: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to drop m_output_amounts: ", result).c_str()));
if (auto result = mdb_drop(txn, m_spent_keys, 0)) if (auto result = mdb_drop(txn, m_spent_keys, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_spent_keys: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to drop m_spent_keys: ", result).c_str()));
if (auto result = mdb_drop(txn, m_locked_outputs, 0)) if (auto result = mdb_drop(txn, m_locked_leaves, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_locked_outputs: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to drop m_locked_leaves: ", result).c_str()));
if (auto result = mdb_drop(txn, m_leaves, 0)) if (auto result = mdb_drop(txn, m_leaves, 0))
throw0(DB_ERROR(lmdb_error("Failed to drop m_leaves: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to drop m_leaves: ", result).c_str()));
if (auto result = mdb_drop(txn, m_layers, 0)) if (auto result = mdb_drop(txn, m_layers, 0))
@ -4314,6 +4399,27 @@ uint64_t BlockchainLMDB::get_num_outputs(const uint64_t& amount) const
return num_elems; return num_elems;
} }
uint64_t BlockchainLMDB::get_num_global_outputs() const
{
LOG_PRINT_L3("BlockchainLMDB:: " << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(output_amounts);
MDB_stat db_stats;
int result = mdb_stat(m_txn, m_output_amounts, &db_stats);
uint64_t count = 0;
if (result != MDB_NOTFOUND)
{
if (result)
throw0(DB_ERROR(lmdb_error("Failed to query m_output_amounts: ", result).c_str()));
count = db_stats.ms_entries;
}
TXN_POSTFIX_RDONLY();
return count;
}
output_data_t BlockchainLMDB::get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const output_data_t BlockchainLMDB::get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const
{ {
LOG_PRINT_L3("BlockchainLMDB::" << __func__); LOG_PRINT_L3("BlockchainLMDB::" << __func__);
@ -6569,15 +6675,18 @@ void BlockchainLMDB::migrate_5_6()
MGINFO_YELLOW("Migrating blockchain from DB version 5 to 6 - this may take a while:"); MGINFO_YELLOW("Migrating blockchain from DB version 5 to 6 - this may take a while:");
// Reset the locked outputs table since not sure of a simple way to continue from where it left off (outputs aren't inserted in order) // Reset all updated tables from migration since not sure of a simple and efficient way to continue if the migration
// stops before it's finished (outputs aren't inserted in order)
MDB_dbi dbi; MDB_dbi dbi;
DELETE_DB("locked_outputs"); DELETE_DB("locked_leaves");
DELETE_DB("leaves"); DELETE_DB("leaves");
DELETE_DB("layers"); DELETE_DB("layers");
DELETE_DB("block_infn"); DELETE_DB("block_infn");
// TODO: if I instead iterate over every block's outputs and go in order that way, I'd know where to leave off based on // TODO: if I instead iterate over every block's outputs and go in order that way, I'd know where to leave off based on
// the new block_infn table. Problem is that's less efficient (read block tx hashes, use tx hashes to read output ID's, read outputs) // the new block_infn table. Problem is that's less efficient (read block tx hashes, use tx hashes to read output ID's, read outputs)
// ... Could also require outputs be inserted all-or-nothing first, and then can pick up where left off for the tree
// if any of leaves, layers, or block_infn tables exist, then locked_leaves migration should be complete
do do
{ {
@ -6588,8 +6697,8 @@ void BlockchainLMDB::migrate_5_6()
result = mdb_txn_begin(m_env, NULL, 0, txn); result = mdb_txn_begin(m_env, NULL, 0, txn);
if (result) if (result)
throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to create a transaction for the db: ", result).c_str()));
lmdb_db_open(txn, LMDB_LOCKED_OUTPUTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_outputs, "Failed to open db handle for m_locked_outputs"); lmdb_db_open(txn, LMDB_LOCKED_LEAVES, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_locked_leaves, "Failed to open db handle for m_locked_leaves");
mdb_set_dupsort(txn, m_locked_outputs, compare_uint64); mdb_set_dupsort(txn, m_locked_leaves, compare_uint64);
txn.commit(); txn.commit();
if (!m_batch_transactions) if (!m_batch_transactions)
@ -6597,11 +6706,13 @@ void BlockchainLMDB::migrate_5_6()
batch_start(1000); batch_start(1000);
txn.m_txn = m_write_txn->m_txn; txn.m_txn = m_write_txn->m_txn;
MDB_cursor *c_output_amounts, *c_locked_outputs; MDB_cursor *c_output_amounts, *c_locked_leaves;
MDB_val k, v; MDB_val k, v;
MDB_cursor_op op = MDB_FIRST; MDB_cursor_op op = MDB_FIRST;
const uint64_t n_outputs = this->get_num_global_outputs();
i = 0; i = 0;
while (1) while (1)
{ {
@ -6627,7 +6738,7 @@ void BlockchainLMDB::migrate_5_6()
if (result) if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for output amounts: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to open a cursor for output amounts: ", result).c_str()));
result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); result = mdb_cursor_open(txn, m_locked_leaves, &c_locked_leaves);
if (result) if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str()));
@ -6664,70 +6775,22 @@ void BlockchainLMDB::migrate_5_6()
output_data.commitment = rct::zeroCommit(amount); output_data.commitment = rct::zeroCommit(amount);
} }
// Only valid keys can be used to construct fcmp's // Convert the output into a leaf tuple
if (!check_key(output_data.pubkey)) fcmp::curve_trees::CurveTreesV1::LeafTuple leaf_tuple;
try
{ {
MERROR("Invalid output pub key: " << output_data.pubkey); leaf_tuple = fcmp::curve_trees::curve_trees_v1.output_to_leaf_tuple(
output_data.pubkey,
rct::rct2pk(output_data.commitment));
}
catch(...)
{
// Invalid outputs can't be added to the tree
continue; continue;
} }
// Torsion clear the output pub key
// TODO: don't need to decompress and recompress points, can be optimized
rct::key torsion_cleared_pubkey = rct::scalarmultKey(rct::pk2rct(output_data.pubkey), rct::INV_EIGHT);
torsion_cleared_pubkey = rct::scalarmult8(torsion_cleared_pubkey);
// Get the block in which the output will unlock // Get the block in which the output will unlock
// TODO: separate function that should also be used when syncing const uint64_t unlock_height = cryptonote::get_unlock_height(output_data.unlock_time, output_data.height);
uint64_t unlock_height;
// TODO: double triple check off by 1
if (output_data.unlock_time == 0)
{
unlock_height = output_data.height + CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE;
}
else if (output_data.unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER)
{
unlock_height = output_data.unlock_time;
}
else
{
// Interpret the output_data.unlock_time as time
// TODO: hardcode correct times for each network and take in nettype
const auto hf_v15_time = 1656629118;
const auto hf_v15_height = 2689608;
// Use the last hard fork's time and block combo to convert the time-based timelock into an unlock block
// TODO: consider taking into account 60s block times when that was consensus
if (hf_v15_time > output_data.unlock_time)
{
const auto seconds_since_unlock = hf_v15_time - output_data.unlock_time;
const auto blocks_since_unlock = seconds_since_unlock / DIFFICULTY_TARGET_V2;
CHECK_AND_ASSERT_THROW_MES(hf_v15_height > blocks_since_unlock, "unexpected blocks since unlock");
unlock_height = hf_v15_height - blocks_since_unlock;
}
else
{
const auto seconds_until_unlock = output_data.unlock_time - hf_v15_time;
const auto blocks_until_unlock = seconds_until_unlock / DIFFICULTY_TARGET_V2;
unlock_height = hf_v15_height + blocks_until_unlock;
}
/* Note: it's possible for the output to be spent before it reaches the unlock_height; this is ok. It can't
be spent again using an fcmp because it'll have a duplicate key image. It's possible for the output to
unlock by old rules, and then re-lock again. This is also ok, we just need to be sure that the new hf rules
use this unlock_height.
*/
// TODO: double check the accuracy of this calculation
MDEBUG("unlock time: " << output_data.unlock_time << " , unlock_height: " << unlock_height);
}
// Get the leaf tuple
const auto leaf_tuple = fcmp::curve_trees::curve_trees_v1.output_to_leaf_tuple(
rct::rct2pk(torsion_cleared_pubkey),
rct::rct2pk(output_data.commitment));
if (unlock_height == 60)
MDEBUG(fcmp::curve_trees::curve_trees_v1.m_c2.to_string(leaf_tuple.O_x));
// Now add the leaf tuple to the locked outputs table // Now add the leaf tuple to the locked outputs table
MDB_val_set(k_height, unlock_height); MDB_val_set(k_height, unlock_height);
@ -6735,8 +6798,7 @@ void BlockchainLMDB::migrate_5_6()
// MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent // MDB_NODUPDATA because no benefit to having duplicate outputs in the tree, only 1 can be spent
// Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height // Can't use MDB_APPENDDUP because outputs aren't inserted in order sorted by unlock height
// FIXME: if a dupe is removed from the locked outputs table and then re-inserted, the tree from a migration can look different than a tree constructed from syncing result = mdb_cursor_put(c_locked_leaves, &k_height, &v_tuple, MDB_NODUPDATA);
result = mdb_cursor_put(c_locked_outputs, &k_height, &v_tuple, MDB_NODUPDATA);
if (result != MDB_SUCCESS && result != MDB_KEYEXIST) if (result != MDB_SUCCESS && result != MDB_KEYEXIST)
throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to add locked output: ", result).c_str()));
if (result == MDB_KEYEXIST) if (result == MDB_KEYEXIST)
@ -6770,13 +6832,10 @@ void BlockchainLMDB::migrate_5_6()
mdb_set_dupsort(txn, m_leaves, compare_uint64); mdb_set_dupsort(txn, m_leaves, compare_uint64);
mdb_set_dupsort(txn, m_layers, compare_uint64); mdb_set_dupsort(txn, m_layers, compare_uint64);
MDB_cursor *c_locked_outputs, *c_new_block_info, *c_old_block_info; MDB_cursor *c_locked_leaves, *c_new_block_info, *c_old_block_info;
MDB_val k, v;
MDB_val k_blk, v_blk; MDB_val k_blk, v_blk;
MDB_cursor_op op = MDB_FIRST;
const uint64_t n_blocks = height(); const uint64_t n_blocks = height();
i = 0; i = 0;
@ -6799,7 +6858,7 @@ void BlockchainLMDB::migrate_5_6()
memset(&m_wcursors, 0, sizeof(m_wcursors)); memset(&m_wcursors, 0, sizeof(m_wcursors));
} }
result = mdb_cursor_open(txn, m_locked_outputs, &c_locked_outputs); result = mdb_cursor_open(txn, m_locked_leaves, &c_locked_leaves);
if (result) if (result)
throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str())); throw0(DB_ERROR(lmdb_error("Failed to open a cursor for locked outputs: ", result).c_str()));
@ -6819,53 +6878,12 @@ void BlockchainLMDB::migrate_5_6()
} }
} }
MDB_val_set(k_height, i);
// Get all the locked outputs at that height // Get all the locked outputs at that height
std::vector<fcmp::curve_trees::CurveTreesV1::LeafTuple> leaf_tuples; const auto leaf_tuples = this->get_locked_leaf_tuples_at_height(i);
// TODO: double check this gets all leaf tuples when it does multiple iters
MDB_cursor_op op = MDB_SET;
while (1)
{
result = mdb_cursor_get(c_locked_outputs, &k_height, &v, op);
if (result == MDB_NOTFOUND)
break;
if (result != MDB_SUCCESS)
throw0(DB_ERROR(lmdb_error("Failed to get next locked outputs: ", result).c_str()));
op = MDB_NEXT_MULTIPLE;
const uint64_t h = *(const uint64_t*)k_height.mv_data;
if (h != i)
throw0(DB_ERROR(("Height " + std::to_string(h) + " not the expected" + std::to_string(i)).c_str()));
const auto range_begin = ((const fcmp::curve_trees::CurveTreesV1::LeafTuple*)v.mv_data);
const auto range_end = range_begin + v.mv_size / sizeof(fcmp::curve_trees::CurveTreesV1::LeafTuple);
auto it = range_begin;
// The first MDB_NEXT_MULTIPLE includes the val from MDB_SET, so skip it
if (leaf_tuples.size() == 1)
++it;
while (it < range_end)
{
leaf_tuples.push_back(*it);
++it;
}
}
CHECK_AND_ASSERT_THROW_MES(m_write_txn != nullptr, "Must have m_write_txn set to grow tree");
this->grow_tree(fcmp::curve_trees::curve_trees_v1, leaf_tuples); this->grow_tree(fcmp::curve_trees::curve_trees_v1, leaf_tuples);
// TODO: Remove locked outputs from the locked outputs table after adding them to tree // TODO: Remove locked outputs from the locked outputs table after adding them to tree
// Now update block info with num leaves in tree and new merkle root
const std::size_t n_leaf_tuples = this->get_num_leaf_tuples();
const auto root = this->get_tree_root();
MDEBUG("n_leaf_tuples: " << n_leaf_tuples);
// Get old block_info and use it to set the new one with new values // Get old block_info and use it to set the new one with new values
result = mdb_cursor_get(c_old_block_info, &k_blk, &v_blk, MDB_NEXT); result = mdb_cursor_get(c_old_block_info, &k_blk, &v_blk, MDB_NEXT);
if (result) if (result)
@ -6881,8 +6899,10 @@ void BlockchainLMDB::migrate_5_6()
bi.bi_hash = bi_old->bi_hash; bi.bi_hash = bi_old->bi_hash;
bi.bi_cum_rct = bi_old->bi_cum_rct; bi.bi_cum_rct = bi_old->bi_cum_rct;
bi.bi_long_term_block_weight = bi_old->bi_long_term_block_weight; bi.bi_long_term_block_weight = bi_old->bi_long_term_block_weight;
bi.bi_n_leaf_tuples = n_leaf_tuples; bi.bi_n_leaf_tuples = this->get_num_leaf_tuples();
bi.bi_tree_root = root; bi.bi_tree_root = this->get_tree_root();
MDEBUG("Height: " << i << " , n_leaf_tuples: " << bi.bi_n_leaf_tuples);
MDB_val_set(nv, bi); MDB_val_set(nv, bi);
result = mdb_cursor_put(c_new_block_info, (MDB_val *)&zerokval, &nv, MDB_APPENDDUP); result = mdb_cursor_put(c_new_block_info, (MDB_val *)&zerokval, &nv, MDB_APPENDDUP);

View File

@ -27,6 +27,7 @@
#pragma once #pragma once
#include <atomic> #include <atomic>
#include <map>
#include "blockchain_db/blockchain_db.h" #include "blockchain_db/blockchain_db.h"
#include "cryptonote_basic/blobdatatype.h" // for type blobdata #include "cryptonote_basic/blobdatatype.h" // for type blobdata
@ -64,7 +65,7 @@ typedef struct mdb_txn_cursors
MDB_cursor *m_txc_spent_keys; MDB_cursor *m_txc_spent_keys;
MDB_cursor *m_txc_locked_outputs; MDB_cursor *m_txc_locked_leaves;
MDB_cursor *m_txc_leaves; MDB_cursor *m_txc_leaves;
MDB_cursor *m_txc_layers; MDB_cursor *m_txc_layers;
@ -91,7 +92,7 @@ typedef struct mdb_txn_cursors
#define m_cur_tx_indices m_cursors->m_txc_tx_indices #define m_cur_tx_indices m_cursors->m_txc_tx_indices
#define m_cur_tx_outputs m_cursors->m_txc_tx_outputs #define m_cur_tx_outputs m_cursors->m_txc_tx_outputs
#define m_cur_spent_keys m_cursors->m_txc_spent_keys #define m_cur_spent_keys m_cursors->m_txc_spent_keys
#define m_cur_locked_outputs m_cursors->m_txc_locked_outputs #define m_cur_locked_leaves m_cursors->m_txc_locked_leaves
#define m_cur_leaves m_cursors->m_txc_leaves #define m_cur_leaves m_cursors->m_txc_leaves
#define m_cur_layers m_cursors->m_txc_layers #define m_cur_layers m_cursors->m_txc_layers
#define m_cur_txpool_meta m_cursors->m_txc_txpool_meta #define m_cur_txpool_meta m_cursors->m_txc_txpool_meta
@ -116,7 +117,7 @@ typedef struct mdb_rflags
bool m_rf_tx_indices; bool m_rf_tx_indices;
bool m_rf_tx_outputs; bool m_rf_tx_outputs;
bool m_rf_spent_keys; bool m_rf_spent_keys;
bool m_rf_locked_outputs; bool m_rf_locked_leaves;
bool m_rf_leaves; bool m_rf_leaves;
bool m_rf_layers; bool m_rf_layers;
bool m_rf_txpool_meta; bool m_rf_txpool_meta;
@ -277,6 +278,7 @@ public:
virtual uint64_t get_tx_block_height(const crypto::hash& h) const; virtual uint64_t get_tx_block_height(const crypto::hash& h) const;
virtual uint64_t get_num_outputs(const uint64_t& amount) const; virtual uint64_t get_num_outputs(const uint64_t& amount) const;
virtual uint64_t get_num_global_outputs() const;
virtual output_data_t get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const; virtual output_data_t get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const;
virtual void get_output_key(const epee::span<const uint64_t> &amounts, const std::vector<uint64_t> &offsets, std::vector<output_data_t> &outputs, bool allow_partial = false) const; virtual void get_output_key(const epee::span<const uint64_t> &amounts, const std::vector<uint64_t> &offsets, std::vector<output_data_t> &outputs, bool allow_partial = false) const;
@ -370,10 +372,10 @@ public:
virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees,
const std::vector<fcmp::curve_trees::CurveTreesV1::LeafTuple> &new_leaves); const std::vector<fcmp::curve_trees::CurveTreesV1::LeafTuple> &new_leaves);
virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples); virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples);
virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees,
const std::size_t expected_n_leaf_tuples) const; const uint64_t expected_n_leaf_tuples) const;
private: private:
void do_resize(uint64_t size_increase=0); void do_resize(uint64_t size_increase=0);
@ -389,6 +391,7 @@ private:
, const uint64_t& coins_generated , const uint64_t& coins_generated
, uint64_t num_rct_outs , uint64_t num_rct_outs
, const crypto::hash& block_hash , const crypto::hash& block_hash
, const std::multimap<uint64_t, fcmp::curve_trees::CurveTreesV1::LeafTuple>& leaf_tuples_by_unlock_height
); );
virtual void remove_block(); virtual void remove_block();
@ -421,13 +424,13 @@ private:
template<typename C> template<typename C>
void grow_layer(const C &curve, void grow_layer(const C &curve,
const std::vector<fcmp::curve_trees::LayerExtension<C>> &layer_extensions, const std::vector<fcmp::curve_trees::LayerExtension<C>> &layer_extensions,
const std::size_t c_idx, const uint64_t c_idx,
const std::size_t layer_idx); const uint64_t layer_idx);
template<typename C> template<typename C>
void trim_layer(const fcmp::curve_trees::LayerReduction<C> &layer_reduction, const std::size_t layer_idx); void trim_layer(const fcmp::curve_trees::LayerReduction<C> &layer_reduction, const uint64_t layer_idx);
std::size_t get_num_leaf_tuples() const; uint64_t get_num_leaf_tuples() const;
std::array<uint8_t, 32UL> get_tree_root() const; std::array<uint8_t, 32UL> get_tree_root() const;
@ -443,10 +446,12 @@ private:
template<typename C_CHILD, typename C_PARENT> template<typename C_CHILD, typename C_PARENT>
bool audit_layer(const C_CHILD &c_child, bool audit_layer(const C_CHILD &c_child,
const C_PARENT &c_parent, const C_PARENT &c_parent,
const std::size_t layer_idx, const uint64_t layer_idx,
const std::size_t child_start_idx, const uint64_t child_start_idx,
const std::size_t child_chunk_idx, const uint64_t child_chunk_idx,
const std::size_t chunk_width) const; const uint64_t chunk_width) const;
std::vector<fcmp::curve_trees::CurveTreesV1::LeafTuple> get_locked_leaf_tuples_at_height(const uint64_t height);
uint64_t num_outputs() const; uint64_t num_outputs() const;
@ -515,7 +520,7 @@ private:
MDB_dbi m_spent_keys; MDB_dbi m_spent_keys;
MDB_dbi m_locked_outputs; MDB_dbi m_locked_leaves;
MDB_dbi m_leaves; MDB_dbi m_leaves;
MDB_dbi m_layers; MDB_dbi m_layers;

View File

@ -100,6 +100,7 @@ public:
virtual std::vector<cryptonote::transaction> get_tx_list(const std::vector<crypto::hash>& hlist) const override { return std::vector<cryptonote::transaction>(); } virtual std::vector<cryptonote::transaction> get_tx_list(const std::vector<crypto::hash>& hlist) const override { return std::vector<cryptonote::transaction>(); }
virtual uint64_t get_tx_block_height(const crypto::hash& h) const override { return 0; } virtual uint64_t get_tx_block_height(const crypto::hash& h) const override { return 0; }
virtual uint64_t get_num_outputs(const uint64_t& amount) const override { return 1; } virtual uint64_t get_num_outputs(const uint64_t& amount) const override { return 1; }
virtual uint64_t get_num_global_outputs() const override { return 1; }
virtual uint64_t get_indexing_base() const override { return 0; } virtual uint64_t get_indexing_base() const override { return 0; }
virtual cryptonote::output_data_t get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const override { return cryptonote::output_data_t(); } virtual cryptonote::output_data_t get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt) const override { return cryptonote::output_data_t(); }
virtual cryptonote::tx_out_index get_output_tx_and_index_from_global(const uint64_t& index) const override { return cryptonote::tx_out_index(); } virtual cryptonote::tx_out_index get_output_tx_and_index_from_global(const uint64_t& index) const override { return cryptonote::tx_out_index(); }
@ -118,8 +119,8 @@ public:
virtual void remove_spent_key(const crypto::key_image& k_image) override {} virtual void remove_spent_key(const crypto::key_image& k_image) override {}
virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, virtual void grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees,
const std::vector<fcmp::curve_trees::CurveTreesV1::LeafTuple> &new_leaves) override {}; const std::vector<fcmp::curve_trees::CurveTreesV1::LeafTuple> &new_leaves) override {};
virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t trim_n_leaf_tuples) override {}; virtual void trim_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t trim_n_leaf_tuples) override {};
virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const std::size_t expected_n_leaf_tuples) const override { return false; }; virtual bool audit_tree(const fcmp::curve_trees::CurveTreesV1 &curve_trees, const uint64_t expected_n_leaf_tuples) const override { return false; };
virtual bool for_all_key_images(std::function<bool(const crypto::key_image&)>) const override { return true; } virtual bool for_all_key_images(std::function<bool(const crypto::key_image&)>) const override { return true; }
virtual bool for_blocks_range(const uint64_t&, const uint64_t&, std::function<bool(uint64_t, const crypto::hash&, const cryptonote::block&)>) const override { return true; } virtual bool for_blocks_range(const uint64_t&, const uint64_t&, std::function<bool(uint64_t, const crypto::hash&, const cryptonote::block&)>) const override { return true; }
@ -148,6 +149,7 @@ public:
, const uint64_t& coins_generated , const uint64_t& coins_generated
, uint64_t num_rct_outs , uint64_t num_rct_outs
, const crypto::hash& blk_hash , const crypto::hash& blk_hash
, const std::multimap<uint64_t, fcmp::curve_trees::CurveTreesV1::LeafTuple>& leaf_tuples_by_unlock_height
) override { } ) override { }
virtual cryptonote::block get_block_from_height(const uint64_t& height) const override { return cryptonote::block(); } virtual cryptonote::block get_block_from_height(const uint64_t& height) const override { return cryptonote::block(); }
virtual void set_hard_fork_version(uint64_t height, uint8_t version) override {} virtual void set_hard_fork_version(uint64_t height, uint8_t version) override {}

View File

@ -1644,4 +1644,57 @@ namespace cryptonote
sc_sub((unsigned char*)key.data, (const unsigned char*)key.data, (const unsigned char*)hash.data); sc_sub((unsigned char*)key.data, (const unsigned char*)key.data, (const unsigned char*)hash.data);
return key; return key;
} }
//---------------------------------------------------------------
// TODO: write tests for this func
uint64_t get_unlock_height(uint64_t unlock_time, uint64_t height_included_in_chain)
{
uint64_t unlock_height = 0;
const uint64_t default_unlock_height = height_included_in_chain + CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE;
// TODO: double triple check off by 1
if (unlock_time == 0)
{
unlock_height = default_unlock_height;
}
else if (unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER)
{
unlock_height = unlock_time;
}
else
{
// Interpret the unlock_time as time
// TODO: hardcode correct times for each network and take in nettype
const auto hf_v15_time = 1656629118;
const auto hf_v15_height = 2689608;
// Use the last hard fork's time and block combo to convert the time-based timelock into an unlock block
// TODO: consider taking into account 60s block times when that was consensus
if (hf_v15_time > unlock_time)
{
const auto seconds_since_unlock = hf_v15_time - unlock_time;
const auto blocks_since_unlock = seconds_since_unlock / DIFFICULTY_TARGET_V2;
CHECK_AND_ASSERT_THROW_MES(hf_v15_height > blocks_since_unlock, "unexpected blocks since unlock");
unlock_height = hf_v15_height - blocks_since_unlock;
}
else
{
const auto seconds_until_unlock = unlock_time - hf_v15_time;
const auto blocks_until_unlock = seconds_until_unlock / DIFFICULTY_TARGET_V2;
unlock_height = hf_v15_height + blocks_until_unlock;
}
/* Note: since this function was introduced for the hf that included fcmp's, it's possible for an output to be
spent before it reaches the unlock_height going by the old rules; this is ok. It can't be spent again because
it'll have a duplicate key image. It's also possible for an output to unlock by old rules, and then re-lock
again at the fork. This is also ok, we just need to be sure that the new hf rules use this unlock_height
starting at the fork for fcmp's.
*/
// TODO: double check the accuracy of this calculation
MDEBUG("unlock time: " << unlock_time << " , unlock_height: " << unlock_height);
}
// Can't unlock earlier than the default unlock height
return std::max(unlock_height, default_unlock_height);
}
} }

View File

@ -265,6 +265,9 @@ namespace cryptonote
crypto::secret_key encrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase); crypto::secret_key encrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase);
crypto::secret_key decrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase); crypto::secret_key decrypt_key(crypto::secret_key key, const epee::wipeable_string &passphrase);
uint64_t get_unlock_height(uint64_t unlock_time, uint64_t height_included_in_chain);
#define CHECKED_GET_SPECIFIC_VARIANT(variant_var, specific_type, variable_name, fail_return_val) \ #define CHECKED_GET_SPECIFIC_VARIANT(variant_var, specific_type, variable_name, fail_return_val) \
CHECK_AND_ASSERT_MES(variant_var.type() == typeid(specific_type), fail_return_val, "wrong variant type: " << variant_var.type().name() << ", expected " << typeid(specific_type).name()); \ CHECK_AND_ASSERT_MES(variant_var.type() == typeid(specific_type), fail_return_val, "wrong variant type: " << variant_var.type().name() << ", expected " << typeid(specific_type).name()); \
specific_type& variable_name = boost::get<specific_type>(variant_var); specific_type& variable_name = boost::get<specific_type>(variant_var);

View File

@ -44,7 +44,9 @@ monero_add_library_with_deps(
target_link_libraries(fcmp target_link_libraries(fcmp
PUBLIC PUBLIC
crypto crypto
cryptonote_basic
epee epee
ringct
PRIVATE PRIVATE
${CMAKE_CURRENT_BINARY_DIR}/fcmp_rust/libfcmp_rust.a ${CMAKE_CURRENT_BINARY_DIR}/fcmp_rust/libfcmp_rust.a
${EXTRA_LIBRARIES}) ${EXTRA_LIBRARIES})

View File

@ -26,7 +26,9 @@
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "cryptonote_basic/cryptonote_format_utils.h"
#include "curve_trees.h" #include "curve_trees.h"
#include "ringct/rctOps.h"
namespace fcmp namespace fcmp
@ -92,10 +94,10 @@ template<typename C>
static LayerExtension<C> hash_children_chunks(const C &curve, static LayerExtension<C> hash_children_chunks(const C &curve,
const typename C::Scalar *old_last_child, const typename C::Scalar *old_last_child,
const typename C::Point *old_last_parent, const typename C::Point *old_last_parent,
const std::size_t start_offset, const uint64_t start_offset,
const std::size_t next_parent_start_index, const uint64_t next_parent_start_index,
const std::vector<typename C::Scalar> &new_child_scalars, const std::vector<typename C::Scalar> &new_child_scalars,
const std::size_t chunk_width) const uint64_t chunk_width)
{ {
LayerExtension<C> parents_out; LayerExtension<C> parents_out;
parents_out.start_idx = next_parent_start_index; parents_out.start_idx = next_parent_start_index;
@ -106,7 +108,7 @@ static LayerExtension<C> hash_children_chunks(const C &curve,
CHECK_AND_ASSERT_THROW_MES(chunk_width > start_offset, "start_offset must be smaller than chunk_width"); CHECK_AND_ASSERT_THROW_MES(chunk_width > start_offset, "start_offset must be smaller than chunk_width");
// See how many children we need to fill up the existing last chunk // See how many children we need to fill up the existing last chunk
std::size_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset); uint64_t chunk_size = std::min(new_child_scalars.size(), chunk_width - start_offset);
MDEBUG("First chunk_size: " << chunk_size << " , num new child scalars: " << new_child_scalars.size() MDEBUG("First chunk_size: " << chunk_size << " , num new child scalars: " << new_child_scalars.size()
<< " , start_offset: " << start_offset << " , parent layer start idx: " << parents_out.start_idx); << " , start_offset: " << start_offset << " , parent layer start idx: " << parents_out.start_idx);
@ -148,7 +150,7 @@ static LayerExtension<C> hash_children_chunks(const C &curve,
} }
// Hash chunks of child scalars to create the parent hashes // Hash chunks of child scalars to create the parent hashes
std::size_t chunk_start_idx = chunk_size; uint64_t chunk_start_idx = chunk_size;
while (chunk_start_idx < new_child_scalars.size()) while (chunk_start_idx < new_child_scalars.size())
{ {
chunk_size = std::min(chunk_width, new_child_scalars.size() - chunk_start_idx); chunk_size = std::min(chunk_width, new_child_scalars.size() - chunk_start_idx);
@ -177,9 +179,9 @@ static LayerExtension<C> hash_children_chunks(const C &curve,
return parents_out; return parents_out;
}; };
//---------------------------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------------------------
static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_total_children, static GrowLayerInstructions get_grow_layer_instructions(const uint64_t old_total_children,
const std::size_t new_total_children, const uint64_t new_total_children,
const std::size_t parent_chunk_width, const uint64_t parent_chunk_width,
const bool last_child_will_change) const bool last_child_will_change)
{ {
// 1. Check pre-conditions on total number of children // 1. Check pre-conditions on total number of children
@ -198,10 +200,10 @@ static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_t
// 2. Calculate old and new total number of parents using totals for children // 2. Calculate old and new total number of parents using totals for children
// If there's only 1 child, then it must be the old root and thus it would have no old parents // If there's only 1 child, then it must be the old root and thus it would have no old parents
const std::size_t old_total_parents = old_total_children > 1 const uint64_t old_total_parents = old_total_children > 1
? (1 + ((old_total_children - 1) / parent_chunk_width)) ? (1 + ((old_total_children - 1) / parent_chunk_width))
: 0; : 0;
const std::size_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); const uint64_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width);
// 3. Check pre-conditions on total number of parents // 3. Check pre-conditions on total number of parents
CHECK_AND_ASSERT_THROW_MES(new_total_parents >= old_total_parents, CHECK_AND_ASSERT_THROW_MES(new_total_parents >= old_total_parents,
@ -218,7 +220,7 @@ static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_t
// 4. Set the current offset in the last chunk // 4. Set the current offset in the last chunk
// - Note: this value starts at the last child in the last chunk, but it might need to be decremented by 1 if we're // - Note: this value starts at the last child in the last chunk, but it might need to be decremented by 1 if we're
// changing that last child // changing that last child
std::size_t offset = old_total_parents > 0 uint64_t offset = old_total_parents > 0
? (old_total_children % parent_chunk_width) ? (old_total_children % parent_chunk_width)
: 0; : 0;
@ -245,7 +247,7 @@ static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_t
const bool need_old_last_parent = need_old_last_child || adding_members_to_existing_last_chunk; const bool need_old_last_parent = need_old_last_child || adding_members_to_existing_last_chunk;
// 9. Set the next parent's start index // 9. Set the next parent's start index
std::size_t next_parent_start_index = old_total_parents; uint64_t next_parent_start_index = old_total_parents;
if (need_old_last_parent) if (need_old_last_parent)
{ {
// If we're updating the last parent, we need to bring the starting parent index back 1 // If we're updating the last parent, we need to bring the starting parent index back 1
@ -280,23 +282,21 @@ static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_t
}; };
//---------------------------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------------------------
static GrowLayerInstructions get_leaf_layer_grow_instructions(const std::size_t old_n_leaf_tuples, static GrowLayerInstructions get_leaf_layer_grow_instructions(const uint64_t old_n_leaf_tuples,
const std::size_t new_n_leaf_tuples, const uint64_t new_n_leaf_tuples,
const std::size_t leaf_tuple_size, const uint64_t leaf_tuple_size,
const std::size_t leaf_layer_chunk_width) const uint64_t leaf_layer_chunk_width)
{ {
// TODO: comments
// The leaf layer can never be the root layer // The leaf layer can never be the root layer
const bool setting_next_layer_after_old_root = false; const bool setting_next_layer_after_old_root = false;
const std::size_t old_total_children = old_n_leaf_tuples * leaf_tuple_size; const uint64_t old_total_children = old_n_leaf_tuples * leaf_tuple_size;
const std::size_t new_total_children = (old_n_leaf_tuples + new_n_leaf_tuples) * leaf_tuple_size; const uint64_t new_total_children = (old_n_leaf_tuples + new_n_leaf_tuples) * leaf_tuple_size;
const std::size_t old_total_parents = old_total_children > 0 const uint64_t old_total_parents = old_total_children > 0
? (1 + ((old_total_children - 1) / leaf_layer_chunk_width)) ? (1 + ((old_total_children - 1) / leaf_layer_chunk_width))
: 0; : 0;
const std::size_t new_total_parents = 1 + ((new_total_children - 1) / leaf_layer_chunk_width); const uint64_t new_total_parents = 1 + ((new_total_children - 1) / leaf_layer_chunk_width);
CHECK_AND_ASSERT_THROW_MES(new_total_children >= old_total_children, CHECK_AND_ASSERT_THROW_MES(new_total_children >= old_total_children,
"new_total_children must be >= old_total_children"); "new_total_children must be >= old_total_children");
@ -306,14 +306,14 @@ static GrowLayerInstructions get_leaf_layer_grow_instructions(const std::size_t
// Since leaf layer is append-only, no leaf can ever change and we'll never need an old leaf // Since leaf layer is append-only, no leaf can ever change and we'll never need an old leaf
const bool need_old_last_child = false; const bool need_old_last_child = false;
const std::size_t offset = old_total_children % leaf_layer_chunk_width; const uint64_t offset = old_total_children % leaf_layer_chunk_width;
const bool last_chunk_is_full = offset == 0; const bool last_chunk_is_full = offset == 0;
const bool adding_members_to_existing_last_chunk = old_total_parents > 0 && !last_chunk_is_full const bool adding_members_to_existing_last_chunk = old_total_parents > 0 && !last_chunk_is_full
&& new_total_children > old_total_children; && new_total_children > old_total_children;
const bool need_old_last_parent = adding_members_to_existing_last_chunk; const bool need_old_last_parent = adding_members_to_existing_last_chunk;
std::size_t next_parent_start_index = old_total_parents; uint64_t next_parent_start_index = old_total_parents;
if (need_old_last_parent) if (need_old_last_parent)
{ {
// If we're updating the last parent, we need to bring the starting parent index back 1 // If we're updating the last parent, we need to bring the starting parent index back 1
@ -356,8 +356,8 @@ static LayerExtension<C_PARENT> get_next_layer_extension(const C_CHILD &c_child,
const std::vector<typename C_CHILD::Point> &child_last_hashes, const std::vector<typename C_CHILD::Point> &child_last_hashes,
const std::vector<typename C_PARENT::Point> &parent_last_hashes, const std::vector<typename C_PARENT::Point> &parent_last_hashes,
const std::vector<LayerExtension<C_CHILD>> child_layer_extensions, const std::vector<LayerExtension<C_CHILD>> child_layer_extensions,
const std::size_t last_updated_child_idx, const uint64_t last_updated_child_idx,
const std::size_t last_updated_parent_idx) const uint64_t last_updated_parent_idx)
{ {
// TODO: comments // TODO: comments
const auto *child_last_hash = (last_updated_child_idx >= child_last_hashes.size()) const auto *child_last_hash = (last_updated_child_idx >= child_last_hashes.size())
@ -412,9 +412,9 @@ static LayerExtension<C_PARENT> get_next_layer_extension(const C_CHILD &c_child,
} }
//---------------------------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------------------------
static TrimLayerInstructions get_trim_layer_instructions( static TrimLayerInstructions get_trim_layer_instructions(
const std::size_t old_total_children, const uint64_t old_total_children,
const std::size_t new_total_children, const uint64_t new_total_children,
const std::size_t parent_chunk_width, const uint64_t parent_chunk_width,
const bool last_child_will_change) const bool last_child_will_change)
{ {
CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "new total children must be > 0"); CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "new total children must be > 0");
@ -422,19 +422,19 @@ static TrimLayerInstructions get_trim_layer_instructions(
"old_total_children must be >= new_total_children"); "old_total_children must be >= new_total_children");
// Calculate old and new total number of parents using totals for children // Calculate old and new total number of parents using totals for children
const std::size_t old_total_parents = 1 + ((old_total_children - 1) / parent_chunk_width); const uint64_t old_total_parents = 1 + ((old_total_children - 1) / parent_chunk_width);
const std::size_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width); const uint64_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width);
CHECK_AND_ASSERT_THROW_MES(old_total_parents >= new_total_parents, CHECK_AND_ASSERT_THROW_MES(old_total_parents >= new_total_parents,
"old_total_parents must be >= new_total_parents"); "old_total_parents must be >= new_total_parents");
CHECK_AND_ASSERT_THROW_MES(new_total_children > new_total_parents, CHECK_AND_ASSERT_THROW_MES(new_total_children > new_total_parents,
"new_total_children must be > new_total_parents"); "new_total_children must be > new_total_parents");
const std::size_t old_offset = old_total_children % parent_chunk_width; const uint64_t old_offset = old_total_children % parent_chunk_width;
std::size_t new_offset = new_total_children % parent_chunk_width; const uint64_t new_offset = new_total_children % parent_chunk_width;
// Get the number of existing children in what will become the new last chunk after trimming // Get the number of existing children in what will become the new last chunk after trimming
const std::size_t new_last_chunk_old_num_children = (old_total_parents > new_total_parents || old_offset == 0) const uint64_t new_last_chunk_old_num_children = (old_total_parents > new_total_parents || old_offset == 0)
? parent_chunk_width ? parent_chunk_width
: old_offset; : old_offset;
@ -444,7 +444,7 @@ static TrimLayerInstructions get_trim_layer_instructions(
"unexpected new_last_chunk_old_num_children"); "unexpected new_last_chunk_old_num_children");
// Get the number of children we'll be trimming from the new last chunk // Get the number of children we'll be trimming from the new last chunk
const std::size_t trim_n_children = new_offset == 0 const uint64_t trim_n_children = new_offset == 0
? 0 // The last chunk wil remain full when the new_offset == 0 ? 0 // The last chunk wil remain full when the new_offset == 0
: new_last_chunk_old_num_children - new_offset; : new_last_chunk_old_num_children - new_offset;
@ -457,43 +457,49 @@ static TrimLayerInstructions get_trim_layer_instructions(
CHECK_AND_ASSERT_THROW_MES(!(need_last_chunk_children_to_trim && need_last_chunk_remaining_children), CHECK_AND_ASSERT_THROW_MES(!(need_last_chunk_children_to_trim && need_last_chunk_remaining_children),
"cannot both need last children to trim and need the remaining children"); "cannot both need last children to trim and need the remaining children");
// TODO: cleaner conditional approach // If we're trimming from the new last chunk OR an element in the new last chunk will change, then we're going to
// TODO: comments // update the existing last hash, since its children are changing
const bool need_last_chunk_parent = !need_last_chunk_remaining_children && const bool update_existing_last_hash = trim_n_children > 0 || last_child_will_change;
(need_last_chunk_children_to_trim || last_child_will_change);
const bool update_existing_last_hash = need_last_chunk_remaining_children || need_last_chunk_parent; // If we're trimming using remaining children, then we're just going to call hash_grow as if the chunk is being
// hashed for the first time, and so we don't need the existing last hash in that case, even if the hash is updating
const bool need_existing_last_hash = update_existing_last_hash && !need_last_chunk_remaining_children;
std::size_t hash_offset = new_offset; // We need to decrement the offset we use to hash the chunk if the last child is changing
uint64_t hash_offset = new_offset;
if (last_child_will_change) if (last_child_will_change)
{ {
hash_offset = hash_offset == 0 ? (parent_chunk_width - 1) : (hash_offset - 1); hash_offset = hash_offset == 0
? (parent_chunk_width - 1) // chunk is full, so decrement full width by 1
if (need_last_chunk_children_to_trim || need_last_chunk_remaining_children) : (hash_offset - 1);
--new_offset;
} }
// Set the child index range so the caller knows which children to read from the tree
uint64_t start_trim_idx = 0;
uint64_t end_trim_idx = 0;
if (need_last_chunk_children_to_trim)
{
// We'll call hash_trim to trim the children between [offset, last chunk end]
const uint64_t chunk_boundary_start = (new_total_parents - 1) * parent_chunk_width;
const uint64_t chunk_boundary_end = chunk_boundary_start + parent_chunk_width;
start_trim_idx = chunk_boundary_start + hash_offset;
end_trim_idx = std::min(chunk_boundary_end, old_total_children);
}
else if (need_last_chunk_remaining_children)
{
// We'll call hash_grow with the remaining children between [0, offset]
CHECK_AND_ASSERT_THROW_MES(new_total_children >= hash_offset, "hash_offset is unexpectedly high");
start_trim_idx = new_total_children - hash_offset;
end_trim_idx = new_total_children;
}
// If we're trimming using remaining children, then we're just going to call hash_grow with offset 0
if (need_last_chunk_remaining_children) if (need_last_chunk_remaining_children)
{ {
hash_offset = 0; hash_offset = 0;
} }
std::size_t start_trim_idx = 0;
std::size_t end_trim_idx = 0;
if (need_last_chunk_children_to_trim)
{
const std::size_t chunk_boundary_start = (new_total_parents - 1) * parent_chunk_width;
const std::size_t chunk_boundary_end = chunk_boundary_start + parent_chunk_width;
start_trim_idx = chunk_boundary_start + new_offset;
end_trim_idx = std::min(chunk_boundary_end, old_total_children);
}
else if (need_last_chunk_remaining_children && new_offset > 0)
{
start_trim_idx = new_total_children - new_offset;
end_trim_idx = new_total_children;
}
MDEBUG("parent_chunk_width: " << parent_chunk_width MDEBUG("parent_chunk_width: " << parent_chunk_width
<< " , old_total_children: " << old_total_children << " , old_total_children: " << old_total_children
<< " , new_total_children: " << new_total_children << " , new_total_children: " << new_total_children
@ -501,10 +507,9 @@ static TrimLayerInstructions get_trim_layer_instructions(
<< " , new_total_parents: " << new_total_parents << " , new_total_parents: " << new_total_parents
<< " , need_last_chunk_children_to_trim: " << need_last_chunk_children_to_trim << " , need_last_chunk_children_to_trim: " << need_last_chunk_children_to_trim
<< " , need_last_chunk_remaining_children: " << need_last_chunk_remaining_children << " , need_last_chunk_remaining_children: " << need_last_chunk_remaining_children
<< " , need_last_chunk_parent: " << need_last_chunk_parent << " , need_existing_last_hash: " << need_existing_last_hash
<< " , need_new_last_child: " << last_child_will_change << " , need_new_last_child: " << last_child_will_change
<< " , update_existing_last_hash: " << update_existing_last_hash << " , update_existing_last_hash: " << update_existing_last_hash
<< " , new_offset: " << new_offset
<< " , hash_offset: " << hash_offset << " , hash_offset: " << hash_offset
<< " , start_trim_idx: " << start_trim_idx << " , start_trim_idx: " << start_trim_idx
<< " , end_trim_idx: " << end_trim_idx); << " , end_trim_idx: " << end_trim_idx);
@ -515,12 +520,11 @@ static TrimLayerInstructions get_trim_layer_instructions(
.new_total_children = new_total_children, .new_total_children = new_total_children,
.old_total_parents = old_total_parents, .old_total_parents = old_total_parents,
.new_total_parents = new_total_parents, .new_total_parents = new_total_parents,
.update_existing_last_hash = update_existing_last_hash,
.need_last_chunk_children_to_trim = need_last_chunk_children_to_trim, .need_last_chunk_children_to_trim = need_last_chunk_children_to_trim,
.need_last_chunk_remaining_children = need_last_chunk_remaining_children, .need_last_chunk_remaining_children = need_last_chunk_remaining_children,
.need_last_chunk_parent = need_last_chunk_parent, .need_existing_last_hash = need_existing_last_hash,
.need_new_last_child = last_child_will_change, .need_new_last_child = last_child_will_change,
.update_existing_last_hash = update_existing_last_hash,
.new_offset = new_offset,
.hash_offset = hash_offset, .hash_offset = hash_offset,
.start_trim_idx = start_trim_idx, .start_trim_idx = start_trim_idx,
.end_trim_idx = end_trim_idx, .end_trim_idx = end_trim_idx,
@ -535,8 +539,8 @@ static typename fcmp::curve_trees::LayerReduction<C_PARENT> get_next_layer_reduc
const std::vector<typename C_PARENT::Point> &parent_last_hashes, const std::vector<typename C_PARENT::Point> &parent_last_hashes,
const std::vector<std::vector<typename C_PARENT::Scalar>> &children_to_trim, const std::vector<std::vector<typename C_PARENT::Scalar>> &children_to_trim,
const std::vector<typename C_CHILD::Point> &child_last_hashes, const std::vector<typename C_CHILD::Point> &child_last_hashes,
const std::size_t parent_layer_idx, const uint64_t parent_layer_idx,
const std::size_t child_layer_idx, const uint64_t child_layer_idx,
const std::vector<LayerReduction<C_CHILD>> &child_reductions) const std::vector<LayerReduction<C_CHILD>> &child_reductions)
{ {
LayerReduction<C_PARENT> layer_reduction_out; LayerReduction<C_PARENT> layer_reduction_out;
@ -544,18 +548,17 @@ static typename fcmp::curve_trees::LayerReduction<C_PARENT> get_next_layer_reduc
layer_reduction_out.new_total_parents = trim_layer_instructions.new_total_parents; layer_reduction_out.new_total_parents = trim_layer_instructions.new_total_parents;
layer_reduction_out.update_existing_last_hash = trim_layer_instructions.update_existing_last_hash; layer_reduction_out.update_existing_last_hash = trim_layer_instructions.update_existing_last_hash;
typename C_PARENT::Point existing_hash = c_parent.m_hash_init_point; if (trim_layer_instructions.need_existing_last_hash)
if (trim_layer_instructions.need_last_chunk_parent)
{
CHECK_AND_ASSERT_THROW_MES(parent_last_hashes.size() > parent_layer_idx, "missing last parent hash"); CHECK_AND_ASSERT_THROW_MES(parent_last_hashes.size() > parent_layer_idx, "missing last parent hash");
existing_hash = parent_last_hashes[parent_layer_idx];
} const typename C_PARENT::Point &existing_hash = trim_layer_instructions.need_existing_last_hash
? parent_last_hashes[parent_layer_idx]
: c_parent.m_hash_init_point;
std::vector<typename C_PARENT::Scalar> child_scalars; std::vector<typename C_PARENT::Scalar> child_scalars;
if (trim_layer_instructions.need_last_chunk_children_to_trim if (trim_layer_instructions.need_last_chunk_children_to_trim
|| trim_layer_instructions.need_last_chunk_remaining_children) || trim_layer_instructions.need_last_chunk_remaining_children)
{ {
// TODO: a clean way to do this without copying
CHECK_AND_ASSERT_THROW_MES(children_to_trim.size() > parent_layer_idx, "missing children to trim"); CHECK_AND_ASSERT_THROW_MES(children_to_trim.size() > parent_layer_idx, "missing children to trim");
child_scalars = children_to_trim[parent_layer_idx]; child_scalars = children_to_trim[parent_layer_idx];
} }
@ -576,8 +579,8 @@ static typename fcmp::curve_trees::LayerReduction<C_PARENT> get_next_layer_reduc
} }
else if (!trim_layer_instructions.need_last_chunk_children_to_trim) else if (!trim_layer_instructions.need_last_chunk_children_to_trim)
{ {
// TODO: cleaner conditional for this case // Falling to this conditional means we're not trimming at all, just updating the old last child
const std::size_t last_child_layer_idx = child_layer_idx - 1; const uint64_t last_child_layer_idx = child_layer_idx - 1;
CHECK_AND_ASSERT_THROW_MES(child_last_hashes.size() > last_child_layer_idx, "missing last child hash"); CHECK_AND_ASSERT_THROW_MES(child_last_hashes.size() > last_child_layer_idx, "missing last child hash");
const typename C_CHILD::Point &old_last_child = child_last_hashes[last_child_layer_idx]; const typename C_CHILD::Point &old_last_child = child_last_hashes[last_child_layer_idx];
@ -624,9 +627,29 @@ static typename fcmp::curve_trees::LayerReduction<C_PARENT> get_next_layer_reduc
//---------------------------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------------------------
template<> template<>
CurveTrees<Helios, Selene>::LeafTuple CurveTrees<Helios, Selene>::output_to_leaf_tuple( CurveTrees<Helios, Selene>::LeafTuple CurveTrees<Helios, Selene>::output_to_leaf_tuple(
const crypto::public_key &O, const crypto::public_key &output_pubkey,
const crypto::public_key &C) const const crypto::public_key &commitment) const
{ {
CHECK_AND_ASSERT_THROW_MES(crypto::check_key(output_pubkey), "invalid output pub key");
const auto clear_torsion = [](const crypto::public_key &key)
{
// TODO: don't need to decompress and recompress points, can be optimized
rct::key torsion_cleared_key = rct::scalarmultKey(rct::pk2rct(key), rct::INV_EIGHT);
torsion_cleared_key = rct::scalarmult8(torsion_cleared_key);
CHECK_AND_ASSERT_THROW_MES(torsion_cleared_key != rct::I, "cannot equal identity");
return torsion_cleared_key;
};
// Torsion clear the output pub key and commitment
const rct::key rct_O = clear_torsion(output_pubkey);
const rct::key rct_C = clear_torsion(commitment);
const crypto::public_key O = rct::rct2pk(rct_O);
const crypto::public_key C = rct::rct2pk(rct_C);
crypto::ec_point I; crypto::ec_point I;
crypto::derive_key_image_generator(O, I); crypto::derive_key_image_generator(O, I);
@ -654,9 +677,43 @@ std::vector<typename C2::Scalar> CurveTrees<C1, C2>::flatten_leaves(const std::v
return flattened_leaves; return flattened_leaves;
}; };
//---------------------------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------------------------
template <>
void CurveTrees<Helios, Selene>::tx_outs_to_leaf_tuples(const cryptonote::transaction &tx,
const uint64_t tx_height,
const bool miner_tx,
std::multimap<uint64_t, CurveTrees<Helios, Selene>::LeafTuple> &leaf_tuples_by_unlock_height_inout) const
{
const uint64_t unlock_height = cryptonote::get_unlock_height(tx.unlock_time, tx_height);
for (std::size_t i = 0; i < tx.vout.size(); ++i)
{
const auto &out = tx.vout[i];
crypto::public_key output_public_key;
if (!cryptonote::get_output_public_key(out, output_public_key))
throw std::runtime_error("Could not get an output public key from a tx output.");
const rct::key commitment = (miner_tx || tx.version < 2)
? rct::zeroCommit(out.amount)
: tx.rct_signatures.outPk[i].mask;
try
{
// Throws an error if output is invalid; we don't want leaf tuples from invalid outputs
auto leaf_tuple = output_to_leaf_tuple(
output_public_key,
rct::rct2pk(commitment));
leaf_tuples_by_unlock_height_inout.emplace(unlock_height, std::move(leaf_tuple));
}
catch (...)
{ /*continue*/ };
}
}
//----------------------------------------------------------------------------------------------------------------------
template<typename C1, typename C2> template<typename C1, typename C2>
typename CurveTrees<C1, C2>::TreeExtension CurveTrees<C1, C2>::get_tree_extension( typename CurveTrees<C1, C2>::TreeExtension CurveTrees<C1, C2>::get_tree_extension(
const std::size_t old_n_leaf_tuples, const uint64_t old_n_leaf_tuples,
const LastHashes &existing_last_hashes, const LastHashes &existing_last_hashes,
const std::vector<LeafTuple> &new_leaf_tuples) const const std::vector<LeafTuple> &new_leaf_tuples) const
{ {
@ -707,13 +764,13 @@ typename CurveTrees<C1, C2>::TreeExtension CurveTrees<C1, C2>::get_tree_extensio
// Alternate between hashing c2 children, c1 children, c2, c1, ... // Alternate between hashing c2 children, c1 children, c2, c1, ...
bool parent_is_c1 = true; bool parent_is_c1 = true;
std::size_t c1_last_idx = 0; uint64_t c1_last_idx = 0;
std::size_t c2_last_idx = 0; uint64_t c2_last_idx = 0;
while (grow_layer_instructions.new_total_parents > 1) while (grow_layer_instructions.new_total_parents > 1)
{ {
MDEBUG("Getting extension for layer " << (c1_last_idx + c2_last_idx + 1)); MDEBUG("Getting extension for layer " << (c1_last_idx + c2_last_idx + 1));
const std::size_t new_total_children = grow_layer_instructions.new_total_parents; const uint64_t new_total_children = grow_layer_instructions.new_total_parents;
grow_layer_instructions = this->set_next_layer_extension( grow_layer_instructions = this->set_next_layer_extension(
grow_layer_instructions, grow_layer_instructions,
@ -736,8 +793,8 @@ typename CurveTrees<C1, C2>::TreeExtension CurveTrees<C1, C2>::get_tree_extensio
//---------------------------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------------------------
template<typename C1, typename C2> template<typename C1, typename C2>
std::vector<TrimLayerInstructions> CurveTrees<C1, C2>::get_trim_instructions( std::vector<TrimLayerInstructions> CurveTrees<C1, C2>::get_trim_instructions(
const std::size_t old_n_leaf_tuples, const uint64_t old_n_leaf_tuples,
const std::size_t trim_n_leaf_tuples) const const uint64_t trim_n_leaf_tuples) const
{ {
CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist"); CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist");
CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves"); CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves");
@ -746,10 +803,10 @@ std::vector<TrimLayerInstructions> CurveTrees<C1, C2>::get_trim_instructions(
// Get trim instructions for the leaf layer // Get trim instructions for the leaf layer
{ {
const std::size_t old_total_leaves = old_n_leaf_tuples * LEAF_TUPLE_SIZE; const uint64_t old_total_leaves = old_n_leaf_tuples * LEAF_TUPLE_SIZE;
const std::size_t new_total_leaves = (old_n_leaf_tuples - trim_n_leaf_tuples) * LEAF_TUPLE_SIZE; const uint64_t new_total_leaves = (old_n_leaf_tuples - trim_n_leaf_tuples) * LEAF_TUPLE_SIZE;
const std::size_t parent_chunk_width = m_leaf_layer_chunk_width; const uint64_t parent_chunk_width = m_leaf_layer_chunk_width;
// Leaf layer's last child never changes since leaf layer is pop-/append-only // Leaf layer's last child never changes since leaf layer is pop-/append-only
const bool last_child_will_change = false; const bool last_child_will_change = false;
@ -789,12 +846,12 @@ typename CurveTrees<C1, C2>::TreeReduction CurveTrees<C1, C2>::get_tree_reductio
CHECK_AND_ASSERT_THROW_MES((trim_instructions[0].new_total_children % LEAF_TUPLE_SIZE) == 0, CHECK_AND_ASSERT_THROW_MES((trim_instructions[0].new_total_children % LEAF_TUPLE_SIZE) == 0,
"unexpected new total leaves"); "unexpected new total leaves");
const std::size_t new_total_leaf_tuples = trim_instructions[0].new_total_children / LEAF_TUPLE_SIZE; const uint64_t new_total_leaf_tuples = trim_instructions[0].new_total_children / LEAF_TUPLE_SIZE;
tree_reduction_out.new_total_leaf_tuples = new_total_leaf_tuples; tree_reduction_out.new_total_leaf_tuples = new_total_leaf_tuples;
bool use_c2 = true; bool use_c2 = true;
std::size_t c1_idx = 0; uint64_t c1_idx = 0;
std::size_t c2_idx = 0; uint64_t c2_idx = 0;
for (const auto &trim_layer_instructions : trim_instructions) for (const auto &trim_layer_instructions : trim_instructions)
{ {
@ -850,8 +907,8 @@ GrowLayerInstructions CurveTrees<C1, C2>::set_next_layer_extension(
const GrowLayerInstructions &prev_layer_instructions, const GrowLayerInstructions &prev_layer_instructions,
const bool parent_is_c1, const bool parent_is_c1,
const LastHashes &last_hashes, const LastHashes &last_hashes,
std::size_t &c1_last_idx_inout, uint64_t &c1_last_idx_inout,
std::size_t &c2_last_idx_inout, uint64_t &c2_last_idx_inout,
TreeExtension &tree_extension_inout) const TreeExtension &tree_extension_inout) const
{ {
const auto &c1_last_hashes = last_hashes.c1_last_hashes; const auto &c1_last_hashes = last_hashes.c1_last_hashes;
@ -860,7 +917,7 @@ GrowLayerInstructions CurveTrees<C1, C2>::set_next_layer_extension(
auto &c1_layer_extensions_out = tree_extension_inout.c1_layer_extensions; auto &c1_layer_extensions_out = tree_extension_inout.c1_layer_extensions;
auto &c2_layer_extensions_out = tree_extension_inout.c2_layer_extensions; auto &c2_layer_extensions_out = tree_extension_inout.c2_layer_extensions;
const std::size_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width; const uint64_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width;
const auto grow_layer_instructions = get_grow_layer_instructions( const auto grow_layer_instructions = get_grow_layer_instructions(
prev_layer_instructions.old_total_parents, prev_layer_instructions.old_total_parents,

View File

@ -28,10 +28,12 @@
#pragma once #pragma once
#include "cryptonote_basic/cryptonote_basic.h"
#include "crypto/crypto.h" #include "crypto/crypto.h"
#include "misc_log_ex.h" #include "misc_log_ex.h"
#include "tower_cycle.h" #include "tower_cycle.h"
#include <map>
#include <vector> #include <vector>
@ -50,7 +52,7 @@ typename C::Point get_new_parent(const C &curve, const typename C::Chunk &new_ch
template<typename C> template<typename C>
struct LayerExtension final struct LayerExtension final
{ {
std::size_t start_idx{0}; uint64_t start_idx{0};
bool update_existing_last_hash; bool update_existing_last_hash;
std::vector<typename C::Point> hashes; std::vector<typename C::Point> hashes;
}; };
@ -59,7 +61,7 @@ struct LayerExtension final
template<typename C> template<typename C>
struct LayerReduction final struct LayerReduction final
{ {
std::size_t new_total_parents{0}; uint64_t new_total_parents{0};
bool update_existing_last_hash; bool update_existing_last_hash;
typename C::Point new_last_hash; typename C::Point new_last_hash;
}; };
@ -68,15 +70,15 @@ struct LayerReduction final
struct GrowLayerInstructions final struct GrowLayerInstructions final
{ {
// The max chunk width of children used to hash into a parent // The max chunk width of children used to hash into a parent
std::size_t parent_chunk_width; uint64_t parent_chunk_width;
// Total children refers to the total number of elements in a layer // Total children refers to the total number of elements in a layer
std::size_t old_total_children; uint64_t old_total_children;
std::size_t new_total_children; uint64_t new_total_children;
// Total parents refers to the total number of hashes of chunks of children // Total parents refers to the total number of hashes of chunks of children
std::size_t old_total_parents; uint64_t old_total_parents;
std::size_t new_total_parents; uint64_t new_total_parents;
// When updating the tree, we use this boolean to know when we'll need to use the tree's existing old root in order // When updating the tree, we use this boolean to know when we'll need to use the tree's existing old root in order
// to set a new layer after that root // to set a new layer after that root
@ -88,37 +90,43 @@ struct GrowLayerInstructions final
bool need_old_last_parent; bool need_old_last_parent;
// The first chunk that needs to be updated's first child's offset within that chunk // The first chunk that needs to be updated's first child's offset within that chunk
std::size_t start_offset; uint64_t start_offset;
// The parent's starting index in the layer // The parent's starting index in the layer
std::size_t next_parent_start_index; uint64_t next_parent_start_index;
}; };
// Useful metadata for trimming a layer // Useful metadata for trimming a layer
struct TrimLayerInstructions final struct TrimLayerInstructions final
{ {
// The max chunk width of children used to hash into a parent // The max chunk width of children used to hash into a parent
std::size_t parent_chunk_width; uint64_t parent_chunk_width;
// Total children refers to the total number of elements in a layer // Total children refers to the total number of elements in a layer
std::size_t old_total_children; uint64_t old_total_children;
std::size_t new_total_children; uint64_t new_total_children;
// Total parents refers to the total number of hashes of chunks of children // Total parents refers to the total number of hashes of chunks of children
std::size_t old_total_parents; uint64_t old_total_parents;
std::size_t new_total_parents; uint64_t new_total_parents;
bool need_last_chunk_children_to_trim;
bool need_last_chunk_remaining_children;
bool need_last_chunk_parent;
bool need_new_last_child;
// True if the new last chunk's existing parent hash will need to be updated
bool update_existing_last_hash; bool update_existing_last_hash;
std::size_t new_offset; // Whether we need to explicitly trim children from the new last chunk
std::size_t hash_offset; bool need_last_chunk_children_to_trim;
// Whether we need to trim by growing using the remaining children from the new last chunk
bool need_last_chunk_remaining_children;
// Whether we need the new last chunk's existing parent hash in order to complete the trim
bool need_existing_last_hash;
// Whether we need the new last child from the new last chunk in order to complete the trim
bool need_new_last_child;
std::size_t start_trim_idx; // The offset to use when hashing the last chunk
std::size_t end_trim_idx; uint64_t hash_offset;
// The starting and ending indexes of the children we're going to need to trim the last chunk
uint64_t start_trim_idx;
uint64_t end_trim_idx;
}; };
//---------------------------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------------------------
@ -130,7 +138,7 @@ template<typename C1, typename C2>
class CurveTrees class CurveTrees
{ {
public: public:
CurveTrees(const C1 &c1, const C2 &c2, const std::size_t c1_width, const std::size_t c2_width): CurveTrees(const C1 &c1, const C2 &c2, const uint64_t c1_width, const uint64_t c2_width):
m_c1{c1}, m_c1{c1},
m_c2{c2}, m_c2{c2},
m_c1_width{c1_width}, m_c1_width{c1_width},
@ -147,20 +155,20 @@ public:
struct LeafTuple final struct LeafTuple final
{ {
// Output ed25519 point x-coordinate // Output ed25519 point x-coordinate
const typename C2::Scalar O_x; typename C2::Scalar O_x;
// Key image generator x-coordinate // Key image generator x-coordinate
const typename C2::Scalar I_x; typename C2::Scalar I_x;
// Commitment x-coordinate // Commitment x-coordinate
const typename C2::Scalar C_x; typename C2::Scalar C_x;
}; };
static const std::size_t LEAF_TUPLE_SIZE = 3; static const uint64_t LEAF_TUPLE_SIZE = 3;
static_assert(sizeof(LeafTuple) == (sizeof(typename C2::Scalar) * LEAF_TUPLE_SIZE), "unexpected LeafTuple size"); static_assert(sizeof(LeafTuple) == (sizeof(typename C2::Scalar) * LEAF_TUPLE_SIZE), "unexpected LeafTuple size");
// Contiguous leaves in the tree, starting a specified start_idx in the leaf layer // Contiguous leaves in the tree, starting a specified start_idx in the leaf layer
struct Leaves final struct Leaves final
{ {
// Starting leaf tuple index in the leaf layer // Starting leaf tuple index in the leaf layer
std::size_t start_leaf_tuple_idx{0}; uint64_t start_leaf_tuple_idx{0};
// Contiguous leaves in a tree that start at the start_idx // Contiguous leaves in a tree that start at the start_idx
std::vector<LeafTuple> tuples; std::vector<LeafTuple> tuples;
}; };
@ -180,7 +188,7 @@ public:
// - c2_layer_reductions[0] is first layer after leaves, then c1_layer_reductions[0], c2_layer_reductions[1], etc // - c2_layer_reductions[0] is first layer after leaves, then c1_layer_reductions[0], c2_layer_reductions[1], etc
struct TreeReduction final struct TreeReduction final
{ {
std::size_t new_total_leaf_tuples; uint64_t new_total_leaf_tuples;
std::vector<LayerReduction<C1>> c1_layer_reductions; std::vector<LayerReduction<C1>> c1_layer_reductions;
std::vector<LayerReduction<C2>> c2_layer_reductions; std::vector<LayerReduction<C2>> c2_layer_reductions;
}; };
@ -206,21 +214,27 @@ public:
//member functions //member functions
public: public:
// Convert cryptonote output pub key and commitment to a leaf tuple for the curve trees tree // Convert cryptonote output pub key and commitment to a leaf tuple for the curve trees tree
LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) const; LeafTuple output_to_leaf_tuple(const crypto::public_key &output_pubkey, const crypto::public_key &C) const;
// Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...] // Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...]
std::vector<typename C2::Scalar> flatten_leaves(const std::vector<LeafTuple> &leaves) const; std::vector<typename C2::Scalar> flatten_leaves(const std::vector<LeafTuple> &leaves) const;
// Convert cryptonote tx outs to leaf tuples, grouped by the leaf tuple unlock height
void tx_outs_to_leaf_tuples(const cryptonote::transaction &tx,
const uint64_t tx_height,
const bool miner_tx,
std::multimap<uint64_t, LeafTuple> &leaf_tuples_by_unlock_height_inout) const;
// Take in the existing number of leaf tuples and the existing last hashes of each layer in the tree, as well as new // Take in the existing number of leaf tuples and the existing last hashes of each layer in the tree, as well as new
// leaves to add to the tree, and return a tree extension struct that can be used to extend a tree // leaves to add to the tree, and return a tree extension struct that can be used to extend a tree
TreeExtension get_tree_extension(const std::size_t old_n_leaf_tuples, TreeExtension get_tree_extension(const uint64_t old_n_leaf_tuples,
const LastHashes &existing_last_hashes, const LastHashes &existing_last_hashes,
const std::vector<LeafTuple> &new_leaf_tuples) const; const std::vector<LeafTuple> &new_leaf_tuples) const;
// Get instructions useful for trimming all existing layers in the tree // Get instructions useful for trimming all existing layers in the tree
std::vector<TrimLayerInstructions> get_trim_instructions( std::vector<TrimLayerInstructions> get_trim_instructions(
const std::size_t old_n_leaf_tuples, const uint64_t old_n_leaf_tuples,
const std::size_t trim_n_leaf_tuples) const; const uint64_t trim_n_leaf_tuples) const;
// Take in the instructions useful for trimming all existing layers in the tree, all children to be trimmed from // Take in the instructions useful for trimming all existing layers in the tree, all children to be trimmed from
// each last chunk, and the existing last hashes in what will become the new last parent of each layer, and return // each last chunk, and the existing last hashes in what will become the new last parent of each layer, and return
@ -238,8 +252,8 @@ private:
const GrowLayerInstructions &prev_layer_instructions, const GrowLayerInstructions &prev_layer_instructions,
const bool parent_is_c1, const bool parent_is_c1,
const LastHashes &last_hashes, const LastHashes &last_hashes,
std::size_t &c1_last_idx_inout, uint64_t &c1_last_idx_inout,
std::size_t &c2_last_idx_inout, uint64_t &c2_last_idx_inout,
TreeExtension &tree_extension_inout) const; TreeExtension &tree_extension_inout) const;
//public member variables //public member variables
@ -249,12 +263,11 @@ public:
const C2 &m_c2; const C2 &m_c2;
// The leaf layer has a distinct chunk width than the other layers // The leaf layer has a distinct chunk width than the other layers
// TODO: public function for update_last_parent, and make this private const uint64_t m_leaf_layer_chunk_width;
const std::size_t m_leaf_layer_chunk_width;
// The chunk widths of the layers in the tree tied to each curve // The chunk widths of the layers in the tree tied to each curve
const std::size_t m_c1_width; const uint64_t m_c1_width;
const std::size_t m_c2_width; const uint64_t m_c2_width;
}; };
//---------------------------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------------------------
using Helios = tower_cycle::Helios; using Helios = tower_cycle::Helios;
@ -263,8 +276,8 @@ using CurveTreesV1 = CurveTrees<Helios, Selene>;
// https://github.com/kayabaNerve/fcmp-plus-plus/blob // https://github.com/kayabaNerve/fcmp-plus-plus/blob
// /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82 // /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82
static const std::size_t HELIOS_CHUNK_WIDTH = 38; static const uint64_t HELIOS_CHUNK_WIDTH = 38;
static const std::size_t SELENE_CHUNK_WIDTH = 18; static const uint64_t SELENE_CHUNK_WIDTH = 18;
static const Helios HELIOS; static const Helios HELIOS;
static const Selene SELENE; static const Selene SELENE;
static const CurveTreesV1 curve_trees_v1(HELIOS, SELENE, HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH); static const CurveTreesV1 curve_trees_v1(HELIOS, SELENE, HELIOS_CHUNK_WIDTH, SELENE_CHUNK_WIDTH);

View File

@ -84,6 +84,7 @@ namespace rct {
return bytes[i]; return bytes[i];
} }
bool operator==(const key &k) const { return !crypto_verify_32(bytes, k.bytes); } bool operator==(const key &k) const { return !crypto_verify_32(bytes, k.bytes); }
bool operator!=(const key &k) const { return crypto_verify_32(bytes, k.bytes); }
unsigned char bytes[32]; unsigned char bytes[32];
}; };
typedef std::vector<key> keyV; //vector of keys typedef std::vector<key> keyV; //vector of keys

View File

@ -38,6 +38,7 @@ target_link_libraries(block_weight
PRIVATE PRIVATE
cryptonote_core cryptonote_core
blockchain_db blockchain_db
fcmp
${EXTRA_LIBRARIES}) ${EXTRA_LIBRARIES})
add_test( add_test(

View File

@ -32,6 +32,7 @@
#include <math.h> #include <math.h>
#include "cryptonote_core/cryptonote_core.h" #include "cryptonote_core/cryptonote_core.h"
#include "blockchain_db/testdb.h" #include "blockchain_db/testdb.h"
#include "fcmp/curve_trees.h"
#define LONG_TERM_BLOCK_WEIGHT_WINDOW 5000 #define LONG_TERM_BLOCK_WEIGHT_WINDOW 5000
@ -64,6 +65,7 @@ public:
, const uint64_t& coins_generated , const uint64_t& coins_generated
, uint64_t num_rct_outs , uint64_t num_rct_outs
, const crypto::hash& blk_hash , const crypto::hash& blk_hash
, const std::multimap<uint64_t, fcmp::curve_trees::CurveTreesV1::LeafTuple>& leaf_tuples_by_unlock_height
) override { ) override {
blocks.push_back({block_weight, long_term_block_weight}); blocks.push_back({block_weight, long_term_block_weight});
} }

View File

@ -88,6 +88,7 @@ namespace
, const uint64_t& coins_generated , const uint64_t& coins_generated
, uint64_t num_rct_outs , uint64_t num_rct_outs
, const crypto::hash& blk_hash , const crypto::hash& blk_hash
, const std::multimap<uint64_t, fcmp::curve_trees::CurveTreesV1::LeafTuple>& leaf_tuples_by_unlock_height
) override ) override
{ {
blocks.push_back({blk, blk_hash}); blocks.push_back({blk, blk_hash});
@ -171,7 +172,7 @@ static std::unique_ptr<cryptonote::BlockchainAndPool> init_blockchain(const std:
const block *blk = &boost::get<block>(ev); const block *blk = &boost::get<block>(ev);
auto blk_hash = get_block_hash(*blk); auto blk_hash = get_block_hash(*blk);
bdb->add_block(*blk, 1, 1, 1, 0, 0, blk_hash); bdb->add_block(*blk, 1, 1, 1, 0, 0, blk_hash, {});
} }
bool r = bap->blockchain.init(bdb, nettype, true, test_options, 2, nullptr); bool r = bap->blockchain.init(bdb, nettype, true, test_options, 2, nullptr);

View File

@ -82,12 +82,11 @@ static std::vector<typename C_PARENT::Scalar> get_last_chunk_children_to_trim(co
const CurveTreesGlobalTree::Layer<C_CHILD> &child_layer, const CurveTreesGlobalTree::Layer<C_CHILD> &child_layer,
const bool need_last_chunk_children_to_trim, const bool need_last_chunk_children_to_trim,
const bool need_last_chunk_remaining_children, const bool need_last_chunk_remaining_children,
const std::size_t new_offset,
const std::size_t start_trim_idx, const std::size_t start_trim_idx,
const std::size_t end_trim_idx) const std::size_t end_trim_idx)
{ {
std::vector<typename C_PARENT::Scalar> children_to_trim_out; std::vector<typename C_PARENT::Scalar> children_to_trim_out;
if (need_last_chunk_children_to_trim || (need_last_chunk_remaining_children && new_offset > 0)) if (end_trim_idx > start_trim_idx)
{ {
std::size_t idx = start_trim_idx; std::size_t idx = start_trim_idx;
MDEBUG("Start trim from idx: " << idx << " , ending trim at: " << end_trim_idx); MDEBUG("Start trim from idx: " << idx << " , ending trim at: " << end_trim_idx);
@ -344,13 +343,10 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c
CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions"); CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions");
const auto &trim_leaf_layer_instructions = trim_instructions[0]; const auto &trim_leaf_layer_instructions = trim_instructions[0];
const std::size_t new_offset = trim_leaf_layer_instructions.new_offset;
std::vector<Selene::Scalar> leaves_to_trim; std::vector<Selene::Scalar> leaves_to_trim;
// TODO: separate function // TODO: separate function
if (trim_leaf_layer_instructions.need_last_chunk_children_to_trim || if (trim_leaf_layer_instructions.end_trim_idx > trim_leaf_layer_instructions.start_trim_idx)
(trim_leaf_layer_instructions.need_last_chunk_remaining_children && new_offset > 0))
{ {
std::size_t idx = trim_leaf_layer_instructions.start_trim_idx; std::size_t idx = trim_leaf_layer_instructions.start_trim_idx;
MDEBUG("Start trim from idx: " << idx); MDEBUG("Start trim from idx: " << idx);
@ -384,7 +380,6 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c
const bool need_last_chunk_children_to_trim = trim_layer_instructions.need_last_chunk_children_to_trim; const bool need_last_chunk_children_to_trim = trim_layer_instructions.need_last_chunk_children_to_trim;
const bool need_last_chunk_remaining_children = trim_layer_instructions.need_last_chunk_remaining_children; const bool need_last_chunk_remaining_children = trim_layer_instructions.need_last_chunk_remaining_children;
const std::size_t new_offset = trim_layer_instructions.new_offset;
const std::size_t start_trim_idx = trim_layer_instructions.start_trim_idx; const std::size_t start_trim_idx = trim_layer_instructions.start_trim_idx;
const std::size_t end_trim_idx = trim_layer_instructions.end_trim_idx; const std::size_t end_trim_idx = trim_layer_instructions.end_trim_idx;
@ -397,7 +392,6 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c
m_tree.c1_layers[c1_idx], m_tree.c1_layers[c1_idx],
need_last_chunk_children_to_trim, need_last_chunk_children_to_trim,
need_last_chunk_remaining_children, need_last_chunk_remaining_children,
new_offset,
start_trim_idx, start_trim_idx,
end_trim_idx); end_trim_idx);
@ -413,7 +407,6 @@ CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_c
m_tree.c2_layers[c2_idx], m_tree.c2_layers[c2_idx],
need_last_chunk_children_to_trim, need_last_chunk_children_to_trim,
need_last_chunk_remaining_children, need_last_chunk_remaining_children,
new_offset,
start_trim_idx, start_trim_idx,
end_trim_idx); end_trim_idx);
@ -1102,7 +1095,7 @@ TEST(curve_trees, hash_trim)
const auto selene_scalar_0 = generate_random_selene_scalar(); const auto selene_scalar_0 = generate_random_selene_scalar();
const auto selene_scalar_1 = generate_random_selene_scalar(); const auto selene_scalar_1 = generate_random_selene_scalar();
// Get the initial hash of the 3 selene scalars // Get the initial hash of the 2 selene scalars
std::vector<Selene::Scalar> init_children{selene_scalar_0, selene_scalar_1}; std::vector<Selene::Scalar> init_children{selene_scalar_0, selene_scalar_1};
const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow( const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow(
/*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point, /*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point,
@ -1172,3 +1165,63 @@ TEST(curve_trees, hash_trim)
ASSERT_EQ(trim_res_bytes, grow_res_bytes); ASSERT_EQ(trim_res_bytes, grow_res_bytes);
} }
} }
//----------------------------------------------------------------------------------------------------------------------
TEST(curve_trees, hash_grow)
{
// Start by hashing: {selene_scalar_0, selene_scalar_1}
// Then grow 1: {selene_scalar_0, selene_scalar_1, selene_scalar_2}
// Then grow 1: {selene_scalar_0, selene_scalar_1, selene_scalar_2, selene_scalar_3}
const auto selene_scalar_0 = generate_random_selene_scalar();
const auto selene_scalar_1 = generate_random_selene_scalar();
// Get the initial hash of the 2 selene scalars
std::vector<Selene::Scalar> all_children{selene_scalar_0, selene_scalar_1};
const auto init_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow(
/*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point,
/*offset*/ 0,
/*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(),
/*children*/ Selene::Chunk{all_children.data(), all_children.size()});
// Extend with a new child
const auto selene_scalar_2 = generate_random_selene_scalar();
std::vector<Selene::Scalar> new_children{selene_scalar_2};
const auto ext_hash = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow(
init_hash,
all_children.size(),
fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(),
Selene::Chunk{new_children.data(), new_children.size()});
const auto ext_hash_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(ext_hash);
// Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2}
all_children.push_back(selene_scalar_2);
const auto grow_res = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow(
/*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point,
/*offset*/ 0,
/*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(),
/*children*/ Selene::Chunk{all_children.data(), all_children.size()});
const auto grow_res_bytes = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res);
ASSERT_EQ(ext_hash_bytes, grow_res_bytes);
// Extend again with a new child
const auto selene_scalar_3 = generate_random_selene_scalar();
new_children.clear();
new_children = {selene_scalar_3};
const auto ext_hash2 = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow(
ext_hash,
all_children.size(),
fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(),
Selene::Chunk{new_children.data(), new_children.size()});
const auto ext_hash_bytes2 = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(ext_hash2);
// Now compare to calling hash_grow{selene_scalar_0, selene_scalar_1, selene_scalar_2, selene_scalar_3}
all_children.push_back(selene_scalar_3);
const auto grow_res2 = fcmp::curve_trees::curve_trees_v1.m_c2.hash_grow(
/*existing_hash*/ fcmp::curve_trees::curve_trees_v1.m_c2.m_hash_init_point,
/*offset*/ 0,
/*existing_child_at_offset*/ fcmp::curve_trees::curve_trees_v1.m_c2.zero_scalar(),
/*children*/ Selene::Chunk{all_children.data(), all_children.size()});
const auto grow_res_bytes2 = fcmp::curve_trees::curve_trees_v1.m_c2.to_bytes(grow_res2);
ASSERT_EQ(ext_hash_bytes2, grow_res_bytes2);
}

View File

@ -54,6 +54,7 @@ public:
, const uint64_t& coins_generated , const uint64_t& coins_generated
, uint64_t num_rct_outs , uint64_t num_rct_outs
, const crypto::hash& blk_hash , const crypto::hash& blk_hash
, const std::multimap<uint64_t, fcmp::curve_trees::CurveTreesV1::LeafTuple>& leaf_tuples_by_unlock_height
) override { ) override {
blocks.push_back(blk); blocks.push_back(blk);
} }
@ -107,20 +108,20 @@ TEST(major, Only)
ASSERT_FALSE(hf.add(mkblock(0, 2), 0)); ASSERT_FALSE(hf.add(mkblock(0, 2), 0));
ASSERT_FALSE(hf.add(mkblock(2, 2), 0)); ASSERT_FALSE(hf.add(mkblock(2, 2), 0));
ASSERT_TRUE(hf.add(mkblock(1, 2), 0)); ASSERT_TRUE(hf.add(mkblock(1, 2), 0));
db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash(), {});
// block height 1, only version 1 is accepted // block height 1, only version 1 is accepted
ASSERT_FALSE(hf.add(mkblock(0, 2), 1)); ASSERT_FALSE(hf.add(mkblock(0, 2), 1));
ASSERT_FALSE(hf.add(mkblock(2, 2), 1)); ASSERT_FALSE(hf.add(mkblock(2, 2), 1));
ASSERT_TRUE(hf.add(mkblock(1, 2), 1)); ASSERT_TRUE(hf.add(mkblock(1, 2), 1));
db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(1, 1), 0, 0, 0, 0, 0, crypto::hash(), {});
// block height 2, only version 2 is accepted // block height 2, only version 2 is accepted
ASSERT_FALSE(hf.add(mkblock(0, 2), 2)); ASSERT_FALSE(hf.add(mkblock(0, 2), 2));
ASSERT_FALSE(hf.add(mkblock(1, 2), 2)); ASSERT_FALSE(hf.add(mkblock(1, 2), 2));
ASSERT_FALSE(hf.add(mkblock(3, 2), 2)); ASSERT_FALSE(hf.add(mkblock(3, 2), 2));
ASSERT_TRUE(hf.add(mkblock(2, 2), 2)); ASSERT_TRUE(hf.add(mkblock(2, 2), 2));
db.add_block(mkblock(2, 1), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(2, 1), 0, 0, 0, 0, 0, crypto::hash(), {});
} }
TEST(empty_hardforks, Success) TEST(empty_hardforks, Success)
@ -134,7 +135,7 @@ TEST(empty_hardforks, Success)
ASSERT_TRUE(hf.get_state(time(NULL) + 3600*24*400) == HardFork::Ready); ASSERT_TRUE(hf.get_state(time(NULL) + 3600*24*400) == HardFork::Ready);
for (uint64_t h = 0; h <= 10; ++h) { for (uint64_t h = 0; h <= 10; ++h) {
db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {});
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
} }
ASSERT_EQ(hf.get(0), 1); ASSERT_EQ(hf.get(0), 1);
@ -168,14 +169,14 @@ TEST(check_for_height, Success)
for (uint64_t h = 0; h <= 4; ++h) { for (uint64_t h = 0; h <= 4; ++h) {
ASSERT_TRUE(hf.check_for_height(mkblock(1, 1), h)); ASSERT_TRUE(hf.check_for_height(mkblock(1, 1), h));
ASSERT_FALSE(hf.check_for_height(mkblock(2, 2), h)); // block version is too high ASSERT_FALSE(hf.check_for_height(mkblock(2, 2), h)); // block version is too high
db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {});
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
} }
for (uint64_t h = 5; h <= 10; ++h) { for (uint64_t h = 5; h <= 10; ++h) {
ASSERT_FALSE(hf.check_for_height(mkblock(1, 1), h)); // block version is too low ASSERT_FALSE(hf.check_for_height(mkblock(1, 1), h)); // block version is too low
ASSERT_TRUE(hf.check_for_height(mkblock(2, 2), h)); ASSERT_TRUE(hf.check_for_height(mkblock(2, 2), h));
db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash(), {});
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
} }
} }
@ -192,19 +193,19 @@ TEST(get, next_version)
for (uint64_t h = 0; h <= 4; ++h) { for (uint64_t h = 0; h <= 4; ++h) {
ASSERT_EQ(2, hf.get_next_version()); ASSERT_EQ(2, hf.get_next_version());
db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, 1), 0, 0, 0, 0, 0, crypto::hash(), {});
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
} }
for (uint64_t h = 5; h <= 9; ++h) { for (uint64_t h = 5; h <= 9; ++h) {
ASSERT_EQ(4, hf.get_next_version()); ASSERT_EQ(4, hf.get_next_version());
db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, 2), 0, 0, 0, 0, 0, crypto::hash(), {});
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
} }
for (uint64_t h = 10; h <= 15; ++h) { for (uint64_t h = 10; h <= 15; ++h) {
ASSERT_EQ(4, hf.get_next_version()); ASSERT_EQ(4, hf.get_next_version());
db.add_block(mkblock(hf, h, 4), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, 4), 0, 0, 0, 0, 0, crypto::hash(), {});
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
} }
} }
@ -245,7 +246,7 @@ TEST(steps_asap, Success)
hf.init(); hf.init();
for (uint64_t h = 0; h < 10; ++h) { for (uint64_t h = 0; h < 10; ++h) {
db.add_block(mkblock(hf, h, 9), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, 9), 0, 0, 0, 0, 0, crypto::hash(), {});
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
} }
@ -272,7 +273,7 @@ TEST(steps_1, Success)
hf.init(); hf.init();
for (uint64_t h = 0 ; h < 10; ++h) { for (uint64_t h = 0 ; h < 10; ++h) {
db.add_block(mkblock(hf, h, h+1), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, h+1), 0, 0, 0, 0, 0, crypto::hash(), {});
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
} }
@ -297,7 +298,7 @@ TEST(reorganize, Same)
// index 0 1 2 3 4 5 6 7 8 9 // index 0 1 2 3 4 5 6 7 8 9
static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
for (uint64_t h = 0; h < 20; ++h) { for (uint64_t h = 0; h < 20; ++h) {
db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {});
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
} }
@ -328,7 +329,7 @@ TEST(reorganize, Changed)
static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
static const uint8_t expected_versions[] = { 1, 1, 1, 1, 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9 }; static const uint8_t expected_versions[] = { 1, 1, 1, 1, 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9 };
for (uint64_t h = 0; h < 16; ++h) { for (uint64_t h = 0; h < 16; ++h) {
db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {});
ASSERT_TRUE (hf.add(db.get_block_from_height(h), h)); ASSERT_TRUE (hf.add(db.get_block_from_height(h), h));
} }
@ -348,7 +349,7 @@ TEST(reorganize, Changed)
ASSERT_EQ(db.height(), 3); ASSERT_EQ(db.height(), 3);
hf.reorganize_from_block_height(2); hf.reorganize_from_block_height(2);
for (uint64_t h = 3; h < 16; ++h) { for (uint64_t h = 3; h < 16; ++h) {
db.add_block(mkblock(hf, h, block_versions_new[h]), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, block_versions_new[h]), 0, 0, 0, 0, 0, crypto::hash(), {});
bool ret = hf.add(db.get_block_from_height(h), h); bool ret = hf.add(db.get_block_from_height(h), h);
ASSERT_EQ (ret, h < 15); ASSERT_EQ (ret, h < 15);
} }
@ -372,7 +373,7 @@ TEST(voting, threshold)
for (uint64_t h = 0; h <= 8; ++h) { for (uint64_t h = 0; h <= 8; ++h) {
uint8_t v = 1 + !!(h % 8); uint8_t v = 1 + !!(h % 8);
db.add_block(mkblock(hf, h, v), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, v), 0, 0, 0, 0, 0, crypto::hash(), {});
bool ret = hf.add(db.get_block_from_height(h), h); bool ret = hf.add(db.get_block_from_height(h), h);
if (h >= 8 && threshold == 87) { if (h >= 8 && threshold == 87) {
// for threshold 87, we reach the threshold at height 7, so from height 8, hard fork to version 2, but 8 tries to add 1 // for threshold 87, we reach the threshold at height 7, so from height 8, hard fork to version 2, but 8 tries to add 1
@ -406,7 +407,7 @@ TEST(voting, different_thresholds)
static const uint8_t expected_versions[] = { 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4 }; static const uint8_t expected_versions[] = { 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4 };
for (uint64_t h = 0; h < sizeof(block_versions) / sizeof(block_versions[0]); ++h) { for (uint64_t h = 0; h < sizeof(block_versions) / sizeof(block_versions[0]); ++h) {
db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {});
bool ret = hf.add(db.get_block_from_height(h), h); bool ret = hf.add(db.get_block_from_height(h), h);
ASSERT_EQ(ret, true); ASSERT_EQ(ret, true);
} }
@ -459,7 +460,7 @@ TEST(voting, info)
ASSERT_EQ(expected_thresholds[h], threshold); ASSERT_EQ(expected_thresholds[h], threshold);
ASSERT_EQ(4, voting); ASSERT_EQ(4, voting);
db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash()); db.add_block(mkblock(hf, h, block_versions[h]), 0, 0, 0, 0, 0, crypto::hash(), {});
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h)); ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
} }
} }
@ -522,7 +523,7 @@ TEST(reorganize, changed)
#define ADD(v, h, a) \ #define ADD(v, h, a) \
do { \ do { \
cryptonote::block b = mkblock(hf, h, v); \ cryptonote::block b = mkblock(hf, h, v); \
db.add_block(b, 0, 0, 0, 0, 0, crypto::hash()); \ db.add_block(b, 0, 0, 0, 0, 0, crypto::hash(), {}); \
ASSERT_##a(hf.add(b, h)); \ ASSERT_##a(hf.add(b, h)); \
} while(0) } while(0)
#define ADD_TRUE(v, h) ADD(v, h, TRUE) #define ADD_TRUE(v, h) ADD(v, h, TRUE)

View File

@ -58,6 +58,7 @@ public:
, const uint64_t& coins_generated , const uint64_t& coins_generated
, uint64_t num_rct_outs , uint64_t num_rct_outs
, const crypto::hash& blk_hash , const crypto::hash& blk_hash
, const std::multimap<uint64_t, fcmp::curve_trees::CurveTreesV1::LeafTuple>& leaf_tuples_by_unlock_height
) override { ) override {
blocks.push_back({block_weight, long_term_block_weight}); blocks.push_back({block_weight, long_term_block_weight});
} }