fcmp++: implement iterative audit_tree function

- Recursion goes too deep
This commit is contained in:
j-berman 2024-08-08 19:26:08 -07:00
parent 9f0dd859e6
commit 8b12a335c6
3 changed files with 136 additions and 106 deletions

View File

@ -1972,12 +1972,16 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const
uint64_t layer_idx = 0;
uint64_t child_chunk_idx = 0;
MDB_cursor_op leaf_op = MDB_FIRST;
MDB_cursor_op parent_op = MDB_FIRST;
while (1)
{
// Get next leaf chunk
std::vector<fcmp_pp::curve_trees::CurveTreesV1::LeafTuple> leaf_tuples_chunk;
leaf_tuples_chunk.reserve(m_curve_trees->m_c2_width);
if (child_chunk_idx && child_chunk_idx % 1000 == 0)
MINFO("Auditing layer " << layer_idx << ", child_chunk_idx " << child_chunk_idx);
// Iterate until chunk is full or we get to the end of all leaves
while (1)
{
@ -2003,7 +2007,8 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const
MDB_val_set(v_parent, child_chunk_idx);
MDEBUG("Getting leaf chunk hash starting at child_chunk_idx " << child_chunk_idx);
int result = mdb_cursor_get(m_cur_layers, &k_parent, &v_parent, MDB_GET_BOTH);
int result = mdb_cursor_get(m_cur_layers, &k_parent, &v_parent, parent_op);
parent_op = MDB_NEXT_DUP;
// Check end condition: no more leaf tuples in the leaf layer
if (leaf_tuples_chunk.empty())
@ -2019,6 +2024,8 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const
if (result != MDB_SUCCESS)
throw0(DB_ERROR(lmdb_error("Failed to get parent in first layer: ", result).c_str()));
if (layer_idx != *(uint64_t*)k_parent.mv_data)
throw0(DB_ERROR("unexpected parent encountered"));
// Get the expected leaf chunk hash
const auto leaves = m_curve_trees->flatten_leaves(std::move(leaf_tuples_chunk));
@ -2039,40 +2046,36 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const
const auto expected_bytes = m_curve_trees->m_c2->to_bytes(chunk_hash);
const auto actual_bytes = lv->child_chunk_hash;
CHECK_AND_ASSERT_MES(expected_bytes == actual_bytes, false, "unexpected leaf chunk hash");
CHECK_AND_ASSERT_MES(lv->child_chunk_idx == child_chunk_idx, false, "unexpected child chunk idx");
++child_chunk_idx;
}
MDEBUG("Successfully audited leaf layer");
// Traverse up the tree auditing each layer until we've audited every layer in the tree
while (1)
bool audit_complete = false;
while (!audit_complete)
{
MDEBUG("Auditing layer " << layer_idx);
// Alternate starting with c1 as parent (we already audited c2 leaf parents), then c2 as parent, then c1, etc.
const bool parent_is_c1 = layer_idx % 2 == 0;
if (parent_is_c1)
{
if (this->audit_layer(
audit_complete = this->audit_layer(
/*c_child*/ m_curve_trees->m_c2,
/*c_parent*/ m_curve_trees->m_c1,
layer_idx,
/*child_start_idx*/ 0,
/*child_chunk_idx*/ 0,
/*chunk_width*/ m_curve_trees->m_c1_width))
{
break;
}
/*chunk_width*/ m_curve_trees->m_c1_width);
}
else
{
if (this->audit_layer(
audit_complete = this->audit_layer(
/*c_child*/ m_curve_trees->m_c1,
/*c_parent*/ m_curve_trees->m_c2,
layer_idx,
/*child_start_idx*/ 0,
/*child_chunk_idx*/ 0,
/*chunk_width*/ m_curve_trees->m_c2_width))
{
break;
}
/*chunk_width*/ m_curve_trees->m_c2_width);
}
++layer_idx;
@ -2086,112 +2089,141 @@ bool BlockchainLMDB::audit_tree(const uint64_t expected_n_leaf_tuples) const
template<typename C_CHILD, typename C_PARENT>
bool BlockchainLMDB::audit_layer(const std::unique_ptr<C_CHILD> &c_child,
const std::unique_ptr<C_PARENT> &c_parent,
const uint64_t layer_idx,
const uint64_t child_start_idx,
const uint64_t child_chunk_idx,
const uint64_t child_layer_idx,
const uint64_t chunk_width) const
{
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
check_open();
TXN_PREFIX_RDONLY();
RCURSOR(layers)
MDEBUG("Auditing layer " << layer_idx << " at child_start_idx " << child_start_idx
<< " and child_chunk_idx " << child_chunk_idx);
// Open separate cursors for child and parent layer
MDB_cursor *child_layer_cursor, *parent_layer_cursor;
// Get next child chunk
std::vector<typename C_CHILD::Point> child_chunk;
child_chunk.reserve(chunk_width);
int c_result = mdb_cursor_open(m_txn, m_layers, &child_layer_cursor);
if (c_result)
throw0(DB_ERROR(lmdb_error("Failed to open child cursor: ", c_result).c_str()));
int p_result = mdb_cursor_open(m_txn, m_layers, &parent_layer_cursor);
if (p_result)
throw0(DB_ERROR(lmdb_error("Failed to open parent cursor: ", p_result).c_str()));
MDB_val_copy<uint64_t> k_child(layer_idx);
MDB_val_set(v_child, child_start_idx);
MDB_cursor_op op_child = MDB_GET_BOTH;
// Set the cursors to the start of each layer
const uint64_t parent_layer_idx = child_layer_idx + 1;
MDB_val_set(k_child, child_layer_idx);
MDB_val_set(k_parent, parent_layer_idx);
MDB_val v_child, v_parent;
c_result = mdb_cursor_get(child_layer_cursor, &k_child, &v_child, MDB_SET);
p_result = mdb_cursor_get(parent_layer_cursor, &k_parent, &v_parent, MDB_SET);
if (c_result != MDB_SUCCESS)
throw0(DB_ERROR(lmdb_error("Failed to get child: ", c_result).c_str()));
if (p_result != MDB_SUCCESS && p_result != MDB_NOTFOUND)
throw0(DB_ERROR(lmdb_error("Failed to get parent: ", p_result).c_str()));
// Begin to audit the layer
MDB_cursor_op op_child = MDB_FIRST_DUP;
MDB_cursor_op op_parent = MDB_FIRST_DUP;
bool audit_complete = false;
uint64_t child_chunk_idx = 0;
while (1)
{
int result = mdb_cursor_get(m_cur_layers, &k_child, &v_child, op_child);
op_child = MDB_NEXT_DUP;
if (result == MDB_NOTFOUND)
if (child_chunk_idx && child_chunk_idx % 1000 == 0)
MINFO("Auditing layer " << parent_layer_idx << ", child_chunk_idx " << child_chunk_idx);
// Get next child chunk
std::vector<typename C_CHILD::Point> child_chunk;
child_chunk.reserve(chunk_width);
while (1)
{
int result = mdb_cursor_get(child_layer_cursor, &k_child, &v_child, op_child);
op_child = MDB_NEXT_DUP;
if (result == MDB_NOTFOUND)
break;
if (result != MDB_SUCCESS)
throw0(DB_ERROR(lmdb_error("Failed to get child: ", result).c_str()));
const auto *lv = (layer_val *)v_child.mv_data;
auto child_point = c_child->from_bytes(lv->child_chunk_hash);
child_chunk.emplace_back(std::move(child_point));
if (child_chunk.size() == chunk_width)
break;
}
// Get the actual chunk hash from the db
int result = mdb_cursor_get(parent_layer_cursor, &k_parent, &v_parent, op_parent);
op_parent = MDB_NEXT_DUP;
// Check for end conditions
// End condition A (audit_complete=false): finished auditing layer and ready to move up a layer
// End condition B (audit_complete=true ): finished auditing the tree, no more layers remaining
// End condition A: check if finished auditing this layer
if (child_chunk.empty())
{
// No more children, expect to be done auditing layer and ready to move up a layer
if (result != MDB_NOTFOUND)
throw0(DB_ERROR(lmdb_error("unexpected parent result at parent_layer_idx " + std::to_string(parent_layer_idx)
+ " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str()));
MDEBUG("Finished auditing layer " << child_layer_idx);
audit_complete = false;
break;
}
// End condition B: check if finished auditing the tree
if (child_chunk_idx == 0 && child_chunk.size() == 1)
{
if (p_result != MDB_NOTFOUND)
throw0(DB_ERROR(lmdb_error("unexpected parent of root at parent_layer_idx " + std::to_string(parent_layer_idx)
+ " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str()));
MDEBUG("Encountered root at layer_idx " << child_layer_idx);
audit_complete = true;
break;
}
if (result != MDB_SUCCESS)
throw0(DB_ERROR(lmdb_error("Failed to get child: ", result).c_str()));
throw0(DB_ERROR(lmdb_error("Failed to get parent: ", result).c_str()));
const auto *lv = (layer_val *)v_child.mv_data;
auto child_point = c_child->from_bytes(lv->child_chunk_hash);
if (child_layer_idx != *(uint64_t*)k_child.mv_data)
throw0(DB_ERROR("unexpected child encountered"));
if (parent_layer_idx != *(uint64_t*)k_parent.mv_data)
throw0(DB_ERROR("unexpected parent encountered"));
child_chunk.emplace_back(std::move(child_point));
// Get the expected chunk hash
std::vector<typename C_PARENT::Scalar> child_scalars;
child_scalars.reserve(child_chunk.size());
for (const auto &child : child_chunk)
child_scalars.emplace_back(c_child->point_to_cycle_scalar(child));
const typename C_PARENT::Chunk chunk{child_scalars.data(), child_scalars.size()};
if (child_chunk.size() == chunk_width)
break;
for (uint64_t i = 0; i < child_scalars.size(); ++i)
MDEBUG("Hashing " << c_parent->to_string(child_scalars[i]));
const auto chunk_hash = fcmp_pp::curve_trees::get_new_parent(c_parent, chunk);
MDEBUG("Expected chunk_hash " << c_parent->to_string(chunk_hash) << " (" << child_scalars.size() << " children)");
const auto *lv = (layer_val *)v_parent.mv_data;
MDEBUG("Actual chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash));
const auto actual_bytes = lv->child_chunk_hash;
const auto expected_bytes = c_parent->to_bytes(chunk_hash);
if (actual_bytes != expected_bytes)
throw0(DB_ERROR(("unexpected hash at child_chunk_idx " + std::to_string(child_chunk_idx)).c_str()));
if (lv->child_chunk_idx != child_chunk_idx)
throw0(DB_ERROR(("unexpected child_chunk_idx, epxected " + std::to_string(child_chunk_idx)).c_str()));
++child_chunk_idx;
}
// Get the actual chunk hash from the db
const uint64_t parent_layer_idx = layer_idx + 1;
MDB_val_copy<uint64_t> k_parent(parent_layer_idx);
MDB_val_set(v_parent, child_chunk_idx);
TXN_POSTFIX_RDONLY();
// Check for end conditions
// End condition A (return false): finished auditing layer and ready to move up a layer
// End condition B (return true): finished auditing the tree, no more layers remaining
int result = mdb_cursor_get(m_cur_layers, &k_parent, &v_parent, MDB_GET_BOTH);
// End condition A: check if finished auditing this layer
if (child_chunk.empty())
{
// No more children, expect to be done auditing layer and ready to move up a layer
if (result != MDB_NOTFOUND)
throw0(DB_ERROR(lmdb_error("unexpected parent result at parent_layer_idx " + std::to_string(parent_layer_idx)
+ " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str()));
MDEBUG("Finished auditing layer " << layer_idx);
TXN_POSTFIX_RDONLY();
return false;
}
// End condition B: check if finished auditing the tree
if (child_chunk_idx == 0 && child_chunk.size() == 1)
{
if (result != MDB_NOTFOUND)
throw0(DB_ERROR(lmdb_error("unexpected parent of root at parent_layer_idx " + std::to_string(parent_layer_idx)
+ " , child_chunk_idx " + std::to_string(child_chunk_idx) + " : ", result).c_str()));
MDEBUG("Encountered root at layer_idx " << layer_idx);
TXN_POSTFIX_RDONLY();
return true;
}
if (result != MDB_SUCCESS)
throw0(DB_ERROR(lmdb_error("Failed to get parent: ", result).c_str()));
// Get the expected chunk hash
std::vector<typename C_PARENT::Scalar> child_scalars;
child_scalars.reserve(child_chunk.size());
for (const auto &child : child_chunk)
child_scalars.emplace_back(c_child->point_to_cycle_scalar(child));
const typename C_PARENT::Chunk chunk{child_scalars.data(), child_scalars.size()};
for (uint64_t i = 0; i < child_scalars.size(); ++i)
MDEBUG("Hashing " << c_parent->to_string(child_scalars[i]));
const auto chunk_hash = fcmp_pp::curve_trees::get_new_parent(c_parent, chunk);
MDEBUG("chunk_hash " << c_parent->to_string(chunk_hash) << " , hash init point: "
<< c_parent->to_string(c_parent->hash_init_point()) << " (" << child_scalars.size() << " children)");
const auto *lv = (layer_val *)v_parent.mv_data;
MDEBUG("Actual chunk hash " << epee::string_tools::pod_to_hex(lv->child_chunk_hash));
const auto actual_bytes = lv->child_chunk_hash;
const auto expected_bytes = c_parent->to_bytes(chunk_hash);
if (actual_bytes != expected_bytes)
throw0(DB_ERROR(("unexpected hash at child_chunk_idx " + std::to_string(child_chunk_idx)).c_str()));
// TODO: use while (1) for iterative pattern, don't use recursion
return this->audit_layer(c_child,
c_parent,
layer_idx,
child_start_idx + child_chunk.size(),
child_chunk_idx + 1,
chunk_width);
return audit_complete;
}
std::vector<fcmp_pp::curve_trees::LeafTupleContext> BlockchainLMDB::get_leaf_tuples_at_unlock_block_id(

View File

@ -445,8 +445,6 @@ private:
bool audit_layer(const std::unique_ptr<C_CHILD> &c_child,
const std::unique_ptr<C_PARENT> &c_parent,
const uint64_t layer_idx,
const uint64_t child_start_idx,
const uint64_t child_chunk_idx,
const uint64_t chunk_width) const;
std::vector<fcmp_pp::curve_trees::LeafTupleContext> get_leaf_tuples_at_unlock_block_id(uint64_t block_id);

View File

@ -1397,12 +1397,12 @@ TEST(Serialization, tx_fcmp_pp)
// 2. fcmp++ proof is longer than expected when serializing
{
transaction tx = make_dummy_fcmp_pp_tx();
string blob;
// Extend fcmp++ proof
ASSERT_TRUE(tx.rct_signatures.p.fcmp_pp.size() == fcmp_pp::proof_len(n_inputs));
tx.rct_signatures.p.fcmp_pp.push_back(0x01);
string blob;
ASSERT_FALSE(serialization::dump_binary(tx, blob));
}