Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Fix races in slab allocator and lookup tables and add prepending for op_queues #4754

Merged
merged 6 commits into from
Feb 27, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion barretenberg/cpp/src/barretenberg/common/slab_allocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@ bool allocator_destroyed = false;
// Slabs that are being manually managed by the user.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
std::unordered_map<void*, std::shared_ptr<void>> manual_slabs;

#ifndef NO_MULTITHREADING
std::mutex manual_slabs_mutex;
Copy link
Contributor

@maramihali maramihali Feb 26, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you please add a comment explaining what this is for?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added

#endif
template <typename... Args> inline void dbg_info(Args... args)
{
#if LOGGING == 1
Expand Down Expand Up @@ -219,6 +221,9 @@ std::shared_ptr<void> get_mem_slab(size_t size)
void* get_mem_slab_raw(size_t size)
{
auto slab = get_mem_slab(size);
#ifndef NO_MULTITHREADING
std::unique_lock<std::mutex> lock(manual_slabs_mutex);
#endif
manual_slabs[slab.get()] = slab;
return slab.get();
}
Expand All @@ -229,6 +234,9 @@ void free_mem_slab_raw(void* p)
aligned_free(p);
return;
}
#ifndef NO_MULTITHREADING
std::unique_lock<std::mutex> lock(manual_slabs_mutex);
#endif
manual_slabs.erase(p);
}
} // namespace bb
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ template <typename CycleGroup> struct VMOperation {
res += static_cast<uint32_t>(reset);
return res;
}
bool operator==(const VMOperation<CycleGroup> other) const
{
return add == other.add && mul == other.mul && eq == other.eq && reset == other.reset && z1 == other.z1 &&
z2 == other.z2 && base_point == other.base_point;
}
};
template <typename CycleGroup> struct ScalarMul {
uint32_t pc;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,69 @@ class ECCOpQueue {

Point get_accumulator() { return accumulator; }

/**
* @brief Prepend the information from the previous queue (used before accumulation/merge proof to be able to run
* circuit construction separately)
*
* @param previous
*/
void prepend_previous_queue(const ECCOpQueue& previous)
{
// Allocate enough space
std::vector<ECCVMOperation> raw_ops_updated(raw_ops.size() + previous.raw_ops.size());
// Copy raw_ops
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

delete this line

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

std::copy(previous.raw_ops.begin(), previous.raw_ops.end(), raw_ops_updated.begin());
std::copy(raw_ops.begin(),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i stared at this line quite a bit, can you be more explicit in comments?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added a few

raw_ops.end(),
std::next(raw_ops_updated.begin(), static_cast<long>(previous.raw_ops.size())));

// Swap raw_ops underlying storage
raw_ops.swap(raw_ops_updated);
// Do the same 3 operations for ultra_ops
for (size_t i = 0; i < 4; i++) {
std::vector<Fr> current_ultra_op(ultra_ops[i].size() + previous.ultra_ops[i].size());
std::copy(previous.ultra_ops[i].begin(), previous.ultra_ops[i].end(), current_ultra_op.begin());
std::copy(ultra_ops[i].begin(),
ultra_ops[i].end(),
std::next(current_ultra_op.begin(), static_cast<long>(previous.ultra_ops[i].size())));
ultra_ops[i].swap(current_ultra_op);
}
// Update sizes
current_ultra_ops_size += previous.ultra_ops[0].size();
previous_ultra_ops_size += previous.ultra_ops[0].size();
// Update commitments
ultra_ops_commitments = previous.ultra_ops_commitments;
previous_ultra_ops_commitments = previous.previous_ultra_ops_commitments;
}

/**
* @brief Swap all internal values in this queue with the other
*
* @param other
*/
void swap(ECCOpQueue* other)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can't this be achieved with std::swap?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Made it so you can use std::swap

{
// Swap vectors
raw_ops.swap(other->raw_ops);
for (size_t i = 0; i < 4; i++) {
ultra_ops[i].swap(other->ultra_ops[i]);
}
// Swap sizes
size_t temp = current_ultra_ops_size;
current_ultra_ops_size = other->current_ultra_ops_size;
other->current_ultra_ops_size = temp;
temp = previous_ultra_ops_size;
previous_ultra_ops_size = other->previous_ultra_ops_size;
other->previous_ultra_ops_size = temp;
// Swap commitments
auto commit_temp = previous_ultra_ops_commitments;
previous_ultra_ops_commitments = other->previous_ultra_ops_commitments;
other->previous_ultra_ops_commitments = commit_temp;
commit_temp = ultra_ops_commitments;
ultra_ops_commitments = other->ultra_ops_commitments;
other->ultra_ops_commitments = commit_temp;
}

/**
* @brief Set the current and previous size of the ultra_ops transcript
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,49 @@ TEST(ECCOpQueueTest, InternalAccumulatorCorrectness)
// Adding an equality op should reset the accumulator to zero (the point at infinity)
EXPECT_TRUE(op_queue.get_accumulator().is_point_at_infinity());
}

TEST(ECCOpQueueTest, PrependAndSwapTests)
{
using point = g1::affine_element;
using scalar = fr;

// Compute a simple point accumulation natively
auto P1 = point::random_element();
auto P2 = point::random_element();
auto z = scalar::random_element();

// Add operations to a
ECCOpQueue op_queue_a;
op_queue_a.add_accumulate(P1 + P1);
op_queue_a.mul_accumulate(P2, z + z);

// Add different operations to b
ECCOpQueue op_queue_b;
op_queue_b.mul_accumulate(P2, z);
op_queue_b.add_accumulate(P1);

// Add same operations as to a
ECCOpQueue op_queue_c;
op_queue_c.add_accumulate(P1 + P1);
op_queue_c.mul_accumulate(P2, z + z);

// Swap b with a
op_queue_b.swap(&op_queue_a);

// Check b==c
for (size_t i = 0; i < op_queue_c.raw_ops.size(); i++) {
EXPECT_EQ(op_queue_b.raw_ops[i], op_queue_c.raw_ops[i]);
}

// Prepend b to a
op_queue_a.prepend_previous_queue(op_queue_b);

// Append same operations as now in a to c
op_queue_c.mul_accumulate(P2, z);
op_queue_c.add_accumulate(P1);

// Check a==c
for (size_t i = 0; i < op_queue_c.raw_ops.size(); i++) {
EXPECT_EQ(op_queue_a.raw_ops[i], op_queue_c.raw_ops[i]);
}
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include "plookup_tables.hpp"
#include "barretenberg/common/constexpr_utils.hpp"

#include <mutex>
namespace bb::plookup {

using namespace bb;
Expand All @@ -11,9 +11,17 @@ namespace {
std::array<MultiTable, MultiTableId::NUM_MULTI_TABLES> MULTI_TABLES;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
bool inited = false;

#ifndef NO_MULTITHREADING
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same here about adding a comment

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

std::mutex multi_table_mutex;
#endif
void init_multi_tables()
{
#ifndef NO_MULTITHREADING
std::unique_lock<std::mutex> lock(multi_table_mutex);
#endif
if (inited) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

initialised? :)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok)

return;
}
MULTI_TABLES[MultiTableId::SHA256_CH_INPUT] = sha256_tables::get_choose_input_table(MultiTableId::SHA256_CH_INPUT);
MULTI_TABLES[MultiTableId::SHA256_MAJ_INPUT] =
sha256_tables::get_majority_input_table(MultiTableId::SHA256_MAJ_INPUT);
Expand Down Expand Up @@ -96,9 +104,9 @@ void init_multi_tables()
keccak_tables::Rho<8, i>::get_rho_output_table(MultiTableId::KECCAK_NORMALIZE_AND_ROTATE);
});
MULTI_TABLES[MultiTableId::HONK_DUMMY_MULTI] = dummy_tables::get_honk_dummy_multitable();
inited = true;
}
} // namespace

const MultiTable& create_table(const MultiTableId id)
{
if (!inited) {
Expand Down
Loading