diff --git a/barretenberg/cpp/CMakePresets.json b/barretenberg/cpp/CMakePresets.json index b87e10709b6..7fb7f286d69 100644 --- a/barretenberg/cpp/CMakePresets.json +++ b/barretenberg/cpp/CMakePresets.json @@ -140,6 +140,18 @@ "LDFLAGS": "-fsanitize=thread" } }, + { + "name": "ubsan", + "displayName": "Debugging build with undefined behavior sanitizer on Clang-16", + "description": "Build with undefined behavior sanitizer on clang16 with debugging information", + "inherits": "clang16-dbg", + "binaryDir": "build-ubsan", + "environment": { + "CFLAGS": "-fsanitize=undefined -fsanitize-recover=unsigned-integer-overflow", + "CXXFLAGS": "-fsanitize=undefined -fsanitize-recover=unsigned-integer-overflow", + "LDFLAGS": "-fsanitize=undefined -fsanitize-recover=unsigned-integer-overflow" + } + }, { "name": "msan", "displayName": "Debugging build with memory sanitizer on Clang-16", @@ -336,6 +348,11 @@ "inherits": "default", "configurePreset": "tsan" }, + { + "name": "ubsan", + "inherits": "default", + "configurePreset": "ubsan" + }, { "name": "coverage", "inherits": "default", diff --git a/barretenberg/cpp/src/barretenberg/common/compiler_hints.hpp b/barretenberg/cpp/src/barretenberg/common/compiler_hints.hpp index 1815816a3c4..74e1b4aeccc 100644 --- a/barretenberg/cpp/src/barretenberg/common/compiler_hints.hpp +++ b/barretenberg/cpp/src/barretenberg/common/compiler_hints.hpp @@ -13,4 +13,10 @@ #else #define BBERG_PROFILE #define BBERG_NO_PROFILE +#endif + +#if defined(__clang__) +#define BBERG_ALLOW_SHIFT_OVERFLOW __attribute__((no_sanitize("shift"))) +#else +#define BBERG_ALLOW_SHIFT_OVERFLOW #endif \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp b/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp index a0446f17916..ae96dfd05da 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp @@ -368,6 +368,20 @@ template struct alignas(32) field { k1.data[1] = t2.data[1]; } + /** + * @brief Uses split_into_endomorphism_scalars(field, field&, field&) but returns the directly decomposed uint64 + *pairs representing the uint128. + **/ + static std::array, 2> split_into_endomorphism_scalars(const field& k) + { + // if the modulus is too large, we use more space than we have + static_assert(Params::modulus_3 < 0x4000000000000000ULL); + field f1; + field f2; + split_into_endomorphism_scalars(k, f1, f2); + return std::array{ std::array{ f1.data[0], f1.data[1] }, std::array{ f2.data[0], f2.data[1] } }; + } + static void split_into_endomorphism_scalars_384(const field& input, field& k1_out, field& k2_out) { diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp index 2deae832391..fecfecb2c50 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/element_impl.hpp @@ -637,14 +637,13 @@ element element::mul_with_endomorphism(const Fr& exponent) } uint64_t wnaf_table[num_rounds * 2]; - Fr endo_scalar; - Fr::split_into_endomorphism_scalars(converted_scalar, endo_scalar, *(Fr*)&endo_scalar.data[2]); // NOLINT + std::array, 2> endo_scalars = Fr::split_into_endomorphism_scalars(converted_scalar); bool skew = false; bool endo_skew = false; - wnaf::fixed_wnaf(&endo_scalar.data[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); - wnaf::fixed_wnaf(&endo_scalar.data[2], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); + wnaf::fixed_wnaf(endo_scalars[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); + wnaf::fixed_wnaf(endo_scalars[1], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); element work_element{ T::one_x, T::one_y, Fq::one() }; work_element.self_set_infinity(); @@ -783,14 +782,13 @@ std::vector> element::batch_mul_with_endomo } uint64_t wnaf_table[num_rounds * 2]; - Fr endo_scalar; - Fr::split_into_endomorphism_scalars(converted_scalar, endo_scalar, *(Fr*)&endo_scalar.data[2]); // NOLINT + std::array, 2> endo_scalars = Fr::split_into_endomorphism_scalars(converted_scalar); bool skew = false; bool endo_skew = false; - wnaf::fixed_wnaf(&endo_scalar.data[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); - wnaf::fixed_wnaf(&endo_scalar.data[2], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); + wnaf::fixed_wnaf(endo_scalars[0], &wnaf_table[0], skew, 0, 2, num_wnaf_bits); + wnaf::fixed_wnaf(endo_scalars[1], &wnaf_table[1], endo_skew, 0, 2, num_wnaf_bits); std::vector work_elements(num_points); diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp index c6dbee10ead..444ec74da92 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp @@ -1,10 +1,12 @@ #pragma once +#include "barretenberg/common/compiler_hints.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" #include #include // NOLINTBEGIN(readability-implicit-bool-conversion) namespace barretenberg::wnaf { +using Uint64Pair = std::array; constexpr size_t SCALAR_BITS = 127; #define WNAF_SIZE(x) ((barretenberg::wnaf::SCALAR_BITS + (x)-1) / (x)) // NOLINT(cppcoreguidelines-macro-usage) @@ -70,7 +72,7 @@ constexpr size_t get_num_rounds(const size_t num_points) return WNAF_SIZE(bits_per_bucket + 1); } -template inline uint64_t get_wnaf_bits_const(const uint64_t* scalar) noexcept +template inline uint64_t get_wnaf_bits_const(const Uint64Pair& scalar) noexcept { if constexpr (bits == 0) { return 0ULL; @@ -104,7 +106,9 @@ template inline uint64_t get_wnaf_bits_const( } } -inline uint64_t get_wnaf_bits(const uint64_t* scalar, const uint64_t bits, const uint64_t bit_position) noexcept +inline uint64_t BBERG_ALLOW_SHIFT_OVERFLOW get_wnaf_bits(const Uint64Pair& scalar, + const uint64_t bits, + const uint64_t bit_position) noexcept { /** * we want to take a 128 bit scalar and shift it down by (bit_position). @@ -132,8 +136,11 @@ inline uint64_t get_wnaf_bits(const uint64_t* scalar, const uint64_t bits, const return (lo & bit_mask) | (hi & hi_mask); } -inline void fixed_wnaf_packed( - const uint64_t* scalar, uint64_t* wnaf, bool& skew_map, const uint64_t point_index, const size_t wnaf_bits) noexcept +inline void fixed_wnaf_packed(const Uint64Pair& scalar, + uint64_t* wnaf, + bool& skew_map, + const uint64_t point_index, + const size_t wnaf_bits) noexcept { skew_map = ((scalar[0] & 1) == 0); uint64_t previous = get_wnaf_bits(scalar, wnaf_bits, 0) + static_cast(skew_map); @@ -156,7 +163,7 @@ inline void fixed_wnaf_packed( wnaf[0] = ((slice + predicate) >> 1UL) | (point_index); } -inline void fixed_wnaf(const uint64_t* scalar, +inline void fixed_wnaf(const Uint64Pair& scalar, uint64_t* wnaf, bool& skew_map, const uint64_t point_index, @@ -212,7 +219,7 @@ inline void fixed_wnaf(const uint64_t* scalar, * Ok, so we should be a bit more limited with when we don't include window entries. * The goal here is to identify short scalars, so we want to identify the most significant non-zero window **/ -inline uint64_t get_num_scalar_bits(const uint64_t* scalar) +inline uint64_t get_num_scalar_bits(const Uint64Pair& scalar) { const uint64_t msb_1 = numeric::get_msb(scalar[1]); const uint64_t msb_0 = numeric::get_msb(scalar[0]); @@ -252,7 +259,7 @@ inline uint64_t get_num_scalar_bits(const uint64_t* scalar) * @param num_points Total points in the MSM (2*num_initial_points) * */ -inline void fixed_wnaf_with_counts(const uint64_t* scalar, +inline void fixed_wnaf_with_counts(const Uint64Pair& scalar, uint64_t* wnaf, bool& skew_map, uint64_t* wnaf_round_counts, @@ -326,7 +333,10 @@ inline void fixed_wnaf_with_counts(const uint64_t* scalar, } template -inline void wnaf_round(uint64_t* scalar, uint64_t* wnaf, const uint64_t point_index, const uint64_t previous) noexcept +inline void wnaf_round(const Uint64Pair& scalar, + uint64_t* wnaf, + const uint64_t point_index, + const uint64_t previous) noexcept { constexpr size_t wnaf_entries = (SCALAR_BITS + wnaf_bits - 1) / wnaf_bits; constexpr auto log2_num_points = static_cast(numeric::get_msb(static_cast(num_points))); @@ -351,7 +361,10 @@ inline void wnaf_round(uint64_t* scalar, uint64_t* wnaf, const uint64_t point_in } template -inline void wnaf_round(uint64_t* scalar, uint64_t* wnaf, const uint64_t point_index, const uint64_t previous) noexcept +inline void wnaf_round(const Uint64Pair& scalar, + uint64_t* wnaf, + const uint64_t point_index, + const uint64_t previous) noexcept { constexpr size_t wnaf_entries = (scalar_bits + wnaf_bits - 1) / wnaf_bits; constexpr auto log2_num_points = static_cast(numeric::get_msb(static_cast(num_points))); @@ -377,7 +390,7 @@ inline void wnaf_round(uint64_t* scalar, uint64_t* wnaf, const uint64_t point_in } template -inline void wnaf_round_packed(const uint64_t* scalar, +inline void wnaf_round_packed(const Uint64Pair& scalar, uint64_t* wnaf, const uint64_t point_index, const uint64_t previous) noexcept @@ -406,7 +419,7 @@ inline void wnaf_round_packed(const uint64_t* scalar, } template -inline void fixed_wnaf(uint64_t* scalar, uint64_t* wnaf, bool& skew_map, const size_t point_index) noexcept +inline void fixed_wnaf(const Uint64Pair& scalar, uint64_t* wnaf, bool& skew_map, const size_t point_index) noexcept { skew_map = ((scalar[0] & 1) == 0); uint64_t previous = get_wnaf_bits_const(scalar) + static_cast(skew_map); @@ -414,7 +427,7 @@ inline void fixed_wnaf(uint64_t* scalar, uint64_t* wnaf, bool& skew_map, const s } template -inline void fixed_wnaf(uint64_t* scalar, uint64_t* wnaf, bool& skew_map, const size_t point_index) noexcept +inline void fixed_wnaf(const Uint64Pair& scalar, uint64_t* wnaf, bool& skew_map, const size_t point_index) noexcept { skew_map = ((scalar[0] & 1) == 0); uint64_t previous = get_wnaf_bits_const(scalar) + static_cast(skew_map); @@ -422,7 +435,7 @@ inline void fixed_wnaf(uint64_t* scalar, uint64_t* wnaf, bool& skew_map, const s } template -inline void wnaf_round_with_restricted_first_slice(uint64_t* scalar, +inline void wnaf_round_with_restricted_first_slice(const Uint64Pair& scalar, uint64_t* wnaf, const uint64_t point_index, const uint64_t previous) noexcept @@ -464,7 +477,7 @@ inline void wnaf_round_with_restricted_first_slice(uint64_t* scalar, } template -inline void fixed_wnaf_with_restricted_first_slice(uint64_t* scalar, +inline void fixed_wnaf_with_restricted_first_slice(const Uint64Pair& scalar, uint64_t* wnaf, bool& skew_map, const size_t point_index) noexcept @@ -478,7 +491,7 @@ inline void fixed_wnaf_with_restricted_first_slice(uint64_t* scalar, } // template -// inline void fixed_wnaf_packed(const uint64_t* scalar, +// inline void fixed_wnaf_packed(const Uint64Pair& scalar, // uint64_t* wnaf, // bool& skew_map, // const uint64_t point_index) noexcept diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.test.cpp b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.test.cpp index 916dd3de970..dc3c81f2651 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.test.cpp @@ -36,7 +36,7 @@ void recover_fixed_wnaf(const uint64_t* wnaf, bool skew, uint64_t& hi, uint64_t& TEST(wnaf, WnafZero) { - uint64_t buffer[2]{ 0, 0 }; + std::array buffer{ 0, 0 }; uint64_t wnaf[WNAF_SIZE(5)] = { 0 }; bool skew = false; wnaf::fixed_wnaf<1, 5>(buffer, wnaf, skew, 0); @@ -60,7 +60,7 @@ TEST(wnaf, WnafTwoBitWindow) constexpr uint32_t num_quads = (num_bits >> 1) + 1; uint64_t wnaf[num_quads] = { 0 }; bool skew = false; - barretenberg::wnaf::fixed_wnaf<256, 1, window>(&input.data[0], wnaf, skew, 0); + barretenberg::wnaf::fixed_wnaf<256, 1, window>({ input.data[0], input.data[1] }, wnaf, skew, 0); /** * For representing even numbers, we define a skew: @@ -109,7 +109,7 @@ TEST(wnaf, WnafFixed) buffer.data[1] &= 0x7fffffffffffffffUL; uint64_t wnaf[WNAF_SIZE(5)] = { 0 }; bool skew = false; - wnaf::fixed_wnaf<1, 5>(&buffer.data[0], wnaf, skew, 0); + wnaf::fixed_wnaf<1, 5>({ buffer.data[0], buffer.data[1] }, wnaf, skew, 0); uint64_t recovered_hi = 0; uint64_t recovered_lo = 0; recover_fixed_wnaf(wnaf, skew, recovered_hi, recovered_lo, 5); @@ -119,7 +119,7 @@ TEST(wnaf, WnafFixed) TEST(wnaf, WnafFixedSimpleLo) { - uint64_t rand_buffer[2]{ 1, 0 }; + std::array rand_buffer = { 0, 1 }; uint64_t wnaf[WNAF_SIZE(5)]{ 0 }; bool skew = false; wnaf::fixed_wnaf<1, 5>(rand_buffer, wnaf, skew, 0); @@ -132,7 +132,7 @@ TEST(wnaf, WnafFixedSimpleLo) TEST(wnaf, WnafFixedSimpleHi) { - uint64_t rand_buffer[2] = { 0, 1 }; + std::array rand_buffer = { 0, 1 }; uint64_t wnaf[WNAF_SIZE(5)] = { 0 }; bool skew = false; wnaf::fixed_wnaf<1, 5>(rand_buffer, wnaf, skew, 0); @@ -148,16 +148,13 @@ TEST(wnaf, WnafFixedWithEndoSplit) fr k = engine.get_random_uint256(); k.data[3] &= 0x0fffffffffffffffUL; - fr k1{ 0, 0, 0, 0 }; - fr k2{ 0, 0, 0, 0 }; - - fr::split_into_endomorphism_scalars(k, k1, k2); + auto [k1, k2] = fr::split_into_endomorphism_scalars(k); uint64_t wnaf[WNAF_SIZE(5)] = { 0 }; uint64_t endo_wnaf[WNAF_SIZE(5)] = { 0 }; bool skew = false; bool endo_skew = false; - wnaf::fixed_wnaf<1, 5>(&k1.data[0], wnaf, skew, 0); - wnaf::fixed_wnaf<1, 5>(&k2.data[0], endo_wnaf, endo_skew, 0); + wnaf::fixed_wnaf<1, 5>(k1, wnaf, skew, 0); + wnaf::fixed_wnaf<1, 5>(k2, endo_wnaf, endo_skew, 0); fr k1_recovered{ 0, 0, 0, 0 }; fr k2_recovered{ 0, 0, 0, 0 }; diff --git a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp index 84be0133581..92fd3651736 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp +++ b/barretenberg/cpp/src/barretenberg/ecc/scalar_multiplication/scalar_multiplication.cpp @@ -219,24 +219,23 @@ void compute_wnaf_states(uint64_t* point_schedule, } parallel_for(num_threads, [&](size_t i) { - Fr T0; uint64_t* wnaf_table = &point_schedule[(2 * i) * num_initial_points_per_thread]; const Fr* thread_scalars = &scalars[i * num_initial_points_per_thread]; bool* skew_table = &input_skew_table[(2 * i) * num_initial_points_per_thread]; uint64_t offset = i * num_points_per_thread; for (uint64_t j = 0; j < num_initial_points_per_thread; ++j) { - T0 = thread_scalars[j].from_montgomery_form(); - Fr::split_into_endomorphism_scalars(T0, T0, *(Fr*)&T0.data[2]); + Fr T0 = thread_scalars[j].from_montgomery_form(); + std::array, 2> endo_scalars = Fr::split_into_endomorphism_scalars(T0); - wnaf::fixed_wnaf_with_counts(&T0.data[0], + wnaf::fixed_wnaf_with_counts(endo_scalars[0], &wnaf_table[(j << 1UL)], skew_table[j << 1ULL], &thread_round_counts[i][0], ((j << 1ULL) + offset) << 32ULL, num_points, wnaf_bits); - wnaf::fixed_wnaf_with_counts(&T0.data[2], + wnaf::fixed_wnaf_with_counts(endo_scalars[1], &wnaf_table[(j << 1UL) + 1], skew_table[(j << 1UL) + 1], &thread_round_counts[i][0], diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp index 55bc21b6cbb..6b11078c53d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp @@ -150,10 +150,10 @@ typename element::secp256k1_wnaf_pair element::compu k_u256 = k_u256 >> stagger; if (is_lo) { barretenberg::wnaf::fixed_wnaf( - &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0); + { k_u256.data[0], k_u256.data[1] }, &wnaf_values[0], skew_without_stagger, 0); } else { barretenberg::wnaf::fixed_wnaf( - &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0); + { k_u256.data[2], k_u256.data[2] }, &wnaf_values[0], skew_without_stagger, 0); } // Number of rounds that are needed to reconstruct the scalar without staggered bits @@ -372,7 +372,8 @@ std::vector> element::compute_wnaf(const Fr& scalar) uint64_t wnaf_values[num_rounds] = { 0 }; bool skew = false; - barretenberg::wnaf::fixed_wnaf(&scalar_multiplier.data[0], &wnaf_values[0], skew, 0); + barretenberg::wnaf::fixed_wnaf( + { scalar_multiplier.data[0], scalar_multiplier.data[1] }, &wnaf_values[0], skew, 0); std::vector> wnaf_entries; for (size_t i = 0; i < num_rounds; ++i) { diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp index e199c2b817b..05b9e159f07 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp @@ -527,7 +527,7 @@ TEST(stdlib_plookup, secp256k1_generator) uint64_t wnaf_entries[18] = { 0 }; bool skew = false; - barretenberg::wnaf::fixed_wnaf<129, 1, 8>(&input_value.data[0], &wnaf_entries[0], skew, 0); + barretenberg::wnaf::fixed_wnaf<129, 1, 8>({ input_value.data[0], input_value.data[1] }, &wnaf_entries[0], skew, 0); std::vector naf_values; for (size_t i = 0; i < 17; ++i) {