Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Fix races in slab allocator and lookup tables and add prepending for op_queues #4754

Merged
merged 6 commits into from
Feb 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion barretenberg/cpp/src/barretenberg/common/slab_allocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,10 @@ bool allocator_destroyed = false;
// Slabs that are being manually managed by the user.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
std::unordered_map<void*, std::shared_ptr<void>> manual_slabs;

#ifndef NO_MULTITHREADING
// The manual slabs unordered map is not thread-safe, so we need to manage access to it when multithreaded.
std::mutex manual_slabs_mutex;
Copy link
Contributor

@maramihali maramihali Feb 26, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you please add a comment explaining what this is for?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added

#endif
template <typename... Args> inline void dbg_info(Args... args)
{
#if LOGGING == 1
Expand Down Expand Up @@ -219,6 +222,9 @@ std::shared_ptr<void> get_mem_slab(size_t size)
void* get_mem_slab_raw(size_t size)
{
auto slab = get_mem_slab(size);
#ifndef NO_MULTITHREADING
std::unique_lock<std::mutex> lock(manual_slabs_mutex);
#endif
manual_slabs[slab.get()] = slab;
return slab.get();
}
Expand All @@ -229,6 +235,9 @@ void free_mem_slab_raw(void* p)
aligned_free(p);
return;
}
#ifndef NO_MULTITHREADING
std::unique_lock<std::mutex> lock(manual_slabs_mutex);
#endif
manual_slabs.erase(p);
}
} // namespace bb
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ template <typename CycleGroup> struct VMOperation {
res += static_cast<uint32_t>(reset);
return res;
}
bool operator==(const VMOperation<CycleGroup>& other) const = default;
};
template <typename CycleGroup> struct ScalarMul {
uint32_t pc;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,73 @@ class ECCOpQueue {

Point get_accumulator() { return accumulator; }

/**
* @brief Prepend the information from the previous queue (used before accumulation/merge proof to be able to run
* circuit construction separately)
*
* @param previous
*/
void prepend_previous_queue(const ECCOpQueue& previous)
{
// Allocate enough space
std::vector<ECCVMOperation> raw_ops_updated(raw_ops.size() + previous.raw_ops.size());
// Copy the previous raw ops to the beginning of the new vector
std::copy(previous.raw_ops.begin(), previous.raw_ops.end(), raw_ops_updated.begin());
// Copy the raw ops from current queue after the ones from the previous queue (concatenate them)
std::copy(raw_ops.begin(),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i stared at this line quite a bit, can you be more explicit in comments?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added a few

raw_ops.end(),
std::next(raw_ops_updated.begin(), static_cast<long>(previous.raw_ops.size())));

// Swap raw_ops underlying storage
raw_ops.swap(raw_ops_updated);
// Do the same 3 operations for ultra_ops
for (size_t i = 0; i < 4; i++) {
// Allocate new vector
std::vector<Fr> current_ultra_op(ultra_ops[i].size() + previous.ultra_ops[i].size());
// Copy the previous ultra ops to the beginning of the new vector
std::copy(previous.ultra_ops[i].begin(), previous.ultra_ops[i].end(), current_ultra_op.begin());
// Copy the ultra ops from current queue after the ones from the previous queue (concatenate them)
std::copy(ultra_ops[i].begin(),
ultra_ops[i].end(),
std::next(current_ultra_op.begin(), static_cast<long>(previous.ultra_ops[i].size())));
// Swap storage
ultra_ops[i].swap(current_ultra_op);
}
// Update sizes
current_ultra_ops_size += previous.ultra_ops[0].size();
previous_ultra_ops_size += previous.ultra_ops[0].size();
// Update commitments
ultra_ops_commitments = previous.ultra_ops_commitments;
previous_ultra_ops_commitments = previous.previous_ultra_ops_commitments;
}

/**
* @brief Enable using std::swap on queues
*
*/
friend void swap(ECCOpQueue& lhs, ECCOpQueue& rhs)
{
// Swap vectors
lhs.raw_ops.swap(rhs.raw_ops);
for (size_t i = 0; i < 4; i++) {
lhs.ultra_ops[i].swap(rhs.ultra_ops[i]);
}
// Swap sizes
size_t temp = lhs.current_ultra_ops_size;
lhs.current_ultra_ops_size = rhs.current_ultra_ops_size;
rhs.current_ultra_ops_size = temp;
temp = lhs.previous_ultra_ops_size;
lhs.previous_ultra_ops_size = rhs.previous_ultra_ops_size;
rhs.previous_ultra_ops_size = temp;
// Swap commitments
auto commit_temp = lhs.previous_ultra_ops_commitments;
lhs.previous_ultra_ops_commitments = rhs.previous_ultra_ops_commitments;
rhs.previous_ultra_ops_commitments = commit_temp;
commit_temp = lhs.ultra_ops_commitments;
lhs.ultra_ops_commitments = rhs.ultra_ops_commitments;
rhs.ultra_ops_commitments = commit_temp;
}

/**
* @brief Set the current and previous size of the ultra_ops transcript
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,49 @@ TEST(ECCOpQueueTest, InternalAccumulatorCorrectness)
// Adding an equality op should reset the accumulator to zero (the point at infinity)
EXPECT_TRUE(op_queue.get_accumulator().is_point_at_infinity());
}

TEST(ECCOpQueueTest, PrependAndSwapTests)
{
using point = g1::affine_element;
using scalar = fr;

// Compute a simple point accumulation natively
auto P1 = point::random_element();
auto P2 = point::random_element();
auto z = scalar::random_element();

// Add operations to a
ECCOpQueue op_queue_a;
op_queue_a.add_accumulate(P1 + P1);
op_queue_a.mul_accumulate(P2, z + z);

// Add different operations to b
ECCOpQueue op_queue_b;
op_queue_b.mul_accumulate(P2, z);
op_queue_b.add_accumulate(P1);

// Add same operations as to a
ECCOpQueue op_queue_c;
op_queue_c.add_accumulate(P1 + P1);
op_queue_c.mul_accumulate(P2, z + z);

// Swap b with a
std::swap(op_queue_b, op_queue_a);

// Check b==c
for (size_t i = 0; i < op_queue_c.raw_ops.size(); i++) {
EXPECT_EQ(op_queue_b.raw_ops[i], op_queue_c.raw_ops[i]);
}

// Prepend b to a
op_queue_a.prepend_previous_queue(op_queue_b);

// Append same operations as now in a to c
op_queue_c.mul_accumulate(P2, z);
op_queue_c.add_accumulate(P1);

// Check a==c
for (size_t i = 0; i < op_queue_c.raw_ops.size(); i++) {
EXPECT_EQ(op_queue_a.raw_ops[i], op_queue_c.raw_ops[i]);
}
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include "plookup_tables.hpp"
#include "barretenberg/common/constexpr_utils.hpp"

#include <mutex>
namespace bb::plookup {

using namespace bb;
Expand All @@ -10,10 +10,21 @@ namespace {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
std::array<MultiTable, MultiTableId::NUM_MULTI_TABLES> MULTI_TABLES;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
bool inited = false;
bool initialised = false;
#ifndef NO_MULTITHREADING
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same here about adding a comment

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done


// The multitables initialisation procedure is not thread-sage, so we need to make sure only 1 thread gets to initialize
// them.
std::mutex multi_table_mutex;
#endif
void init_multi_tables()
{
#ifndef NO_MULTITHREADING
std::unique_lock<std::mutex> lock(multi_table_mutex);
#endif
if (initialised) {
return;
}
MULTI_TABLES[MultiTableId::SHA256_CH_INPUT] = sha256_tables::get_choose_input_table(MultiTableId::SHA256_CH_INPUT);
MULTI_TABLES[MultiTableId::SHA256_MAJ_INPUT] =
sha256_tables::get_majority_input_table(MultiTableId::SHA256_MAJ_INPUT);
Expand Down Expand Up @@ -96,14 +107,14 @@ void init_multi_tables()
keccak_tables::Rho<8, i>::get_rho_output_table(MultiTableId::KECCAK_NORMALIZE_AND_ROTATE);
});
MULTI_TABLES[MultiTableId::HONK_DUMMY_MULTI] = dummy_tables::get_honk_dummy_multitable();
initialised = true;
}
} // namespace

const MultiTable& create_table(const MultiTableId id)
{
if (!inited) {
if (!initialised) {
init_multi_tables();
inited = true;
initialised = true;
}
return MULTI_TABLES[id];
}
Expand Down
Loading