diff --git a/Dockerfile.aztec b/Dockerfile.aztec index 0e709558e063..df65e0628283 100644 --- a/Dockerfile.aztec +++ b/Dockerfile.aztec @@ -3,7 +3,7 @@ ENV BB_WORKING_DIRECTORY=/usr/src/bb ENV BB_BINARY_PATH=/usr/src/barretenberg/cpp/build/bin/bb ENV ACVM_WORKING_DIRECTORY=/usr/src/acvm ENV ACVM_BINARY_PATH=/usr/src/noir/noir-repo/target/release/acvm -RUN mkdir -p $BB_WORKING_DIRECTORY $ACVM_WORKING_DIRECTORY /usr/src/yarn-project/world-state/build +RUN mkdir -p $BB_WORKING_DIRECTORY $ACVM_WORKING_DIRECTORY /usr/src/yarn-project/native/build COPY /usr/src /usr/src diff --git a/Dockerfile.end-to-end b/Dockerfile.end-to-end index fa3684649f08..211db0d7e8f6 100644 --- a/Dockerfile.end-to-end +++ b/Dockerfile.end-to-end @@ -9,7 +9,7 @@ ENV BB_BINARY_PATH=/usr/src/barretenberg/cpp/build/bin/bb ENV ACVM_WORKING_DIRECTORY=/usr/src/acvm ENV ACVM_BINARY_PATH=/usr/src/noir/noir-repo/target/release/acvm ENV PROVER_AGENT_CONCURRENCY=8 -RUN mkdir -p $BB_WORKING_DIRECTORY $ACVM_WORKING_DIRECTORY /usr/src/yarn-project/world-state/build +RUN mkdir -p $BB_WORKING_DIRECTORY $ACVM_WORKING_DIRECTORY /usr/src/yarn-project/native/build COPY /usr/src /usr/src COPY /anvil /opt/foundry/bin/anvil diff --git a/aztec-nargo/Dockerfile b/aztec-nargo/Dockerfile index 303edbec5c35..f3321bcb827b 100644 --- a/aztec-nargo/Dockerfile +++ b/aztec-nargo/Dockerfile @@ -11,7 +11,7 @@ FROM --platform=linux/amd64 aztecprotocol/barretenberg-x86_64-linux-clang as bar FROM ubuntu:noble # Install Tini as nargo doesn't handle signals properly. # Install git as nargo needs it to clone. -RUN apt-get update && apt-get install -y git tini jq curl && rm -rf /var/lib/apt/lists/* && apt-get clean +RUN apt-get update && apt-get install -y git tini jq curl nodejs npm && rm -rf /var/lib/apt/lists/* && apt-get clean # Copy binaries to /usr/bin COPY --from=built-noir /usr/src/noir/noir-repo/target/release/nargo /usr/bin/nargo @@ -21,4 +21,9 @@ COPY --from=barretenberg /usr/src/barretenberg/cpp/build/bin/bb /usr/bin/bb # Copy in script that calls both binaries COPY ./aztec-nargo/compile_then_postprocess.sh /usr/src/aztec-nargo/compile_then_postprocess.sh +# Profiler +COPY --from=built-noir /usr/src/noir/noir-repo/target/release/noir-profiler /usr/bin/noir-profiler +COPY ./noir-projects/noir-contracts/scripts/flamegraph.sh /usr/bin/flamegraph.sh +COPY ./noir-projects/noir-contracts/scripts/extractFunctionAsNoirArtifact.js /usr/bin/extractFunctionAsNoirArtifact.js + ENTRYPOINT ["/usr/bin/tini", "--", "/usr/src/aztec-nargo/compile_then_postprocess.sh"] diff --git a/aztec-nargo/Earthfile b/aztec-nargo/Earthfile index cdf45af8f4a6..5f235b5b8333 100644 --- a/aztec-nargo/Earthfile +++ b/aztec-nargo/Earthfile @@ -4,7 +4,7 @@ run: FROM ubuntu:noble # Install Tini as nargo doesn't handle signals properly. # Install git as nargo needs it to clone. - RUN apt-get update && apt-get install -y git tini jq curl && rm -rf /var/lib/apt/lists/* && apt-get clean + RUN apt-get update && apt-get install -y git tini jq curl nodejs npm && rm -rf /var/lib/apt/lists/* && apt-get clean # Copy binaries to /usr/bin COPY ../+bootstrap/usr/src/noir/noir-repo/target/release/nargo /usr/bin/nargo @@ -14,6 +14,11 @@ run: # Copy in script that calls both binaries COPY ./compile_then_postprocess.sh /usr/bin/compile_then_postprocess.sh + # Profiler + COPY ../+bootstrap/usr/src/noir/noir-repo/target/release/noir-profiler /usr/bin/noir-profiler + COPY ../+bootstrap/usr/src/noir-projects/noir-contracts/scripts/flamegraph.sh /usr/bin/flamegraph.sh + COPY ../+bootstrap/usr/src/noir-projects/noir-contracts/scripts/extractFunctionAsNoirArtifact.js /usr/bin/extractFunctionAsNoirArtifact.js + ENV PATH "/usr/bin:${PATH}" ENTRYPOINT ["/usr/bin/tini", "--", "/usr/bin/compile_then_postprocess.sh"] SAVE IMAGE aztecprotocol/aztec-nargo diff --git a/aztec-up/bin/aztec b/aztec-up/bin/aztec index 998d05478dec..a53fc09c6058 100755 --- a/aztec-up/bin/aztec +++ b/aztec-up/bin/aztec @@ -72,6 +72,15 @@ elif [ "${1:-}" == "start" ]; then readarray -t ENV_VARS_TO_INJECT <"$LOCAL_ENV_VAR_FILE" export ENV_VARS_TO_INJECT="${ENV_VARS_TO_INJECT[*]}" ENV_VARS_TO_INJECT="${ENV_VARS_TO_INJECT[*]}" INHERIT_USER=0 $(dirname $0)/.aztec-run aztecprotocol/aztec "$@" +elif [ "${1:-}" == "flamegraph" ]; then + docker run -it \ + --entrypoint /usr/bin/flamegraph.sh \ + --env PROFILER_PATH=/usr/bin/noir-profiler \ + --env BACKEND_PATH=/usr/bin/bb \ + --env SERVE=${SERVE:-0} \ + $([ "${SERVE:-0}" == "1" ] && echo "-p 8000:8000" || echo "") \ + -v $(realpath $(dirname $2))/:/tmp \ + aztecprotocol/aztec-nargo:$VERSION /tmp/$(basename $2) $3 else ENV_VARS_TO_INJECT="SECRET_KEY" SKIP_PORT_ASSIGNMENT=1 $(dirname $0)/.aztec-run aztecprotocol/aztec "$@" fi diff --git a/barretenberg/.gitrepo b/barretenberg/.gitrepo index 858c05f22ddc..dade6609f6ee 100644 --- a/barretenberg/.gitrepo +++ b/barretenberg/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/barretenberg branch = master - commit = 2b718dec0624621c81bdc406bed8db0c30ed2a77 - parent = 6ce23892c8762ffde6fc3198e1e392cd1a72969b + commit = 37c100901c570b61ca79b46f0fdb0bbc5ce22708 + parent = 756b94101345d231b86233921398555512855273 method = merge cmdver = 0.4.6 diff --git a/barretenberg/acir_tests/browser-test-app/yarn.lock b/barretenberg/acir_tests/browser-test-app/yarn.lock index 4c833f03d747..580266135bb6 100644 --- a/barretenberg/acir_tests/browser-test-app/yarn.lock +++ b/barretenberg/acir_tests/browser-test-app/yarn.lock @@ -7,7 +7,7 @@ __metadata: "@aztec/bb.js@file:../../ts::locator=browser-test-app%40workspace%3A.": version: 0.72.1 - resolution: "@aztec/bb.js@file:../../ts#../../ts::hash=7ab3ef&locator=browser-test-app%40workspace%3A." + resolution: "@aztec/bb.js@file:../../ts#../../ts::hash=90d55d&locator=browser-test-app%40workspace%3A." dependencies: comlink: "npm:^4.4.1" commander: "npm:^12.1.0" @@ -17,7 +17,7 @@ __metadata: tslib: "npm:^2.4.0" bin: bb.js: ./dest/node/main.js - checksum: 10c0/463d29bb01102874431d5a83af71a18a92b6b7fd817311474744bfe4af2b82d71efa1796d0a00093bcc87b163922ff748cb6a8480d7c295c726a855d1bdf321d + checksum: 10c0/6ec39503ec545df13d297134ae28d8be01d02105a8518fa9cc15c54d1f93924956ce5c41913d1f2db31a8e05a5bad1fe7f5a0c1d5f1a6346f4703e307cc3f05a languageName: node linkType: hard diff --git a/barretenberg/acir_tests/headless-test/package.json b/barretenberg/acir_tests/headless-test/package.json index 22d84048345c..85c0efb33ce0 100644 --- a/barretenberg/acir_tests/headless-test/package.json +++ b/barretenberg/acir_tests/headless-test/package.json @@ -10,7 +10,7 @@ "dependencies": { "chalk": "^5.3.0", "commander": "^12.1.0", - "playwright": "^1.50.0", + "playwright": "1.49.0", "puppeteer": "^22.4.1" }, "devDependencies": { diff --git a/barretenberg/acir_tests/headless-test/yarn.lock b/barretenberg/acir_tests/headless-test/yarn.lock index 66b179faea20..9689b1ab4525 100644 --- a/barretenberg/acir_tests/headless-test/yarn.lock +++ b/barretenberg/acir_tests/headless-test/yarn.lock @@ -869,7 +869,7 @@ __metadata: dependencies: chalk: "npm:^5.3.0" commander: "npm:^12.1.0" - playwright: "npm:^1.50.0" + playwright: "npm:1.49.0" puppeteer: "npm:^22.4.1" ts-node: "npm:^10.9.2" typescript: "npm:^5.4.2" @@ -1408,27 +1408,27 @@ __metadata: languageName: node linkType: hard -"playwright-core@npm:1.50.0": - version: 1.50.0 - resolution: "playwright-core@npm:1.50.0" +"playwright-core@npm:1.49.0": + version: 1.49.0 + resolution: "playwright-core@npm:1.49.0" bin: playwright-core: cli.js - checksum: 10c0/b0cc7fadcb2db68a7b8d730b26c7a7d17baad454a0697c781e08074a619e57779a90be9b57c4c741ff4895390bdfd093d8393a746e8bf68ae57ac452f4c1cdb2 + checksum: 10c0/22c1a72fabdcc87bd1cd4d40a032d2c5b94cf94ba7484dc182048c3fa1c8ec26180b559d8cac4ca9870e8fd6bdf5ef9d9f54e7a31fd60d67d098fcffc5e4253b languageName: node linkType: hard -"playwright@npm:^1.50.0": - version: 1.50.0 - resolution: "playwright@npm:1.50.0" +"playwright@npm:1.49.0": + version: 1.49.0 + resolution: "playwright@npm:1.49.0" dependencies: fsevents: "npm:2.3.2" - playwright-core: "npm:1.50.0" + playwright-core: "npm:1.49.0" dependenciesMeta: fsevents: optional: true bin: playwright: cli.js - checksum: 10c0/0076a536433819b7122066a07c5fcfa56d40d09cbbec0a39061bbfa832c8a1f626df5e4fe206fbeba56b3a61f0e2b26d4ad3c2b402852d6f147a266fd18e4ddf + checksum: 10c0/e94d662747cd147d0573570fec90dadc013c1097595714036fc8934a075c5a82ab04a49111b03b1f762ea86429bdb7c94460901896901e20970b30ce817cc93f languageName: node linkType: hard diff --git a/barretenberg/cpp/Earthfile b/barretenberg/cpp/Earthfile index 907243e05b0e..a54cd615b09a 100644 --- a/barretenberg/cpp/Earthfile +++ b/barretenberg/cpp/Earthfile @@ -59,10 +59,10 @@ test-cache-read: --command="exit 1" SAVE ARTIFACT build/bin -preset-release-world-state: +preset-release-nodejs-module: FROM +source - DO +CACHE_BUILD_BIN --prefix=preset-release-world-state \ - --command="cmake --preset clang16-pic -Bbuild && cmake --build build --target world_state_napi && mv ./build/lib/world_state_napi.node ./build/bin" + DO +CACHE_BUILD_BIN --prefix=preset-release-nodejs-module \ + --command="cmake --preset clang16-pic -Bbuild && cmake --build build --target nodejs_module && mv ./build/lib/nodejs_module.node ./build/bin" SAVE ARTIFACT build/bin preset-release-assert: @@ -317,4 +317,4 @@ build: BUILD +preset-wasm BUILD +preset-wasm-threads BUILD +preset-release - BUILD +preset-release-world-state + BUILD +preset-release-nodejs-module diff --git a/barretenberg/cpp/bootstrap.sh b/barretenberg/cpp/bootstrap.sh index 931abd31945b..a567cee7f6c5 100755 --- a/barretenberg/cpp/bootstrap.sh +++ b/barretenberg/cpp/bootstrap.sh @@ -17,12 +17,12 @@ function build_native { cache_upload barretenberg-release-$hash.tar.gz build/bin fi - (cd src/barretenberg/world_state_napi && yarn --frozen-lockfile --prefer-offline) - if ! cache_download barretenberg-release-world-state-$hash.tar.gz; then + (cd src/barretenberg/nodejs_module && yarn --frozen-lockfile --prefer-offline) + if ! cache_download barretenberg-release-nodejs-module-$hash.tar.gz; then rm -f build-pic/CMakeCache.txt cmake --preset $pic_preset -DCMAKE_BUILD_TYPE=RelWithAssert - cmake --build --preset $pic_preset --target world_state_napi - cache_upload barretenberg-release-world-state-$hash.tar.gz build-pic/lib/world_state_napi.node + cmake --build --preset $pic_preset --target nodejs_module + cache_upload barretenberg-release-nodejs-module-$hash.tar.gz build-pic/lib/nodejs_module.node fi } @@ -118,4 +118,4 @@ case "$cmd" in *) echo "Unknown command: $cmd" exit 1 -esac \ No newline at end of file +esac diff --git a/barretenberg/cpp/cmake/lmdb.cmake b/barretenberg/cpp/cmake/lmdb.cmake index ca24a99c8025..18009c6684ea 100644 --- a/barretenberg/cpp/cmake/lmdb.cmake +++ b/barretenberg/cpp/cmake/lmdb.cmake @@ -3,6 +3,7 @@ include(ExternalProject) set(LMDB_PREFIX "${CMAKE_BINARY_DIR}/_deps/lmdb") set(LMDB_INCLUDE "${LMDB_PREFIX}/src/lmdb_repo/libraries/liblmdb") set(LMDB_LIB "${LMDB_INCLUDE}/liblmdb.a") +set(LMDB_HEADER "${LMDB_INCLUDE}/lmdb.h") set(LMDB_OBJECT "${LMDB_INCLUDE}/*.o") ExternalProject_Add( @@ -15,7 +16,7 @@ ExternalProject_Add( BUILD_COMMAND make -C libraries/liblmdb -e XCFLAGS=-fPIC liblmdb.a INSTALL_COMMAND "" UPDATE_COMMAND "" # No update step - BUILD_BYPRODUCTS ${LMDB_LIB} ${LMDB_INCLUDE} + BUILD_BYPRODUCTS ${LMDB_LIB} ${LMDB_HEADER} ) add_library(lmdb STATIC IMPORTED GLOBAL) diff --git a/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang b/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang index cf1563c7d592..01c68c61466f 100644 --- a/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang +++ b/barretenberg/cpp/dockerfiles/Dockerfile.x86_64-linux-clang @@ -28,7 +28,7 @@ RUN cmake --build --preset clang16 --target ultra_honk_rounds_bench --target bb RUN npm install --global yarn RUN cmake --preset clang16-pic -RUN cmake --build --preset clang16-pic --target world_state_napi +RUN cmake --build --preset clang16-pic --target nodejs_module FROM ubuntu:lunar WORKDIR /usr/src/barretenberg/cpp @@ -40,4 +40,4 @@ COPY --from=builder /usr/src/barretenberg/cpp/build/bin/grumpkin_srs_gen /usr/sr # Copy libs for consuming projects. COPY --from=builder /usr/src/barretenberg/cpp/build/lib/libbarretenberg.a /usr/src/barretenberg/cpp/build/lib/libbarretenberg.a COPY --from=builder /usr/src/barretenberg/cpp/build/lib/libenv.a /usr/src/barretenberg/cpp/build/lib/libenv.a -COPY --from=builder /usr/src/barretenberg/cpp/build-pic/lib/world_state_napi.node /usr/src/barretenberg/cpp/build-pic/lib/world_state_napi.node +COPY --from=builder /usr/src/barretenberg/cpp/build-pic/lib/nodejs_module.node /usr/src/barretenberg/cpp/build-pic/lib/nodejs_module.node diff --git a/barretenberg/cpp/scripts/lmdblib_tests.sh b/barretenberg/cpp/scripts/lmdblib_tests.sh new file mode 100755 index 000000000000..73b99d5bb13f --- /dev/null +++ b/barretenberg/cpp/scripts/lmdblib_tests.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -e + +# run commands relative to parent directory +cd $(dirname $0)/.. + +DEFAULT_TESTS=LMDBStoreTest.*:LMDBEnvironmentTest.* +TEST=${1:-$DEFAULT_TESTS} +PRESET=${PRESET:-clang16} + +cmake --build --preset $PRESET --target lmdblib_tests +./build/bin/lmdblib_tests --gtest_filter=$TEST diff --git a/barretenberg/cpp/src/CMakeLists.txt b/barretenberg/cpp/src/CMakeLists.txt index cd0965babb97..0091837f26ee 100644 --- a/barretenberg/cpp/src/CMakeLists.txt +++ b/barretenberg/cpp/src/CMakeLists.txt @@ -58,7 +58,7 @@ if (ENABLE_PIC AND CMAKE_CXX_COMPILER_ID MATCHES "Clang") message("Building with Position Independent Code") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") - add_subdirectory(barretenberg/world_state_napi) + add_subdirectory(barretenberg/nodejs_module) endif() add_subdirectory(barretenberg/bb) @@ -78,6 +78,7 @@ add_subdirectory(barretenberg/examples) add_subdirectory(barretenberg/flavor) add_subdirectory(barretenberg/goblin) add_subdirectory(barretenberg/grumpkin_srs_gen) +add_subdirectory(barretenberg/lmdblib) add_subdirectory(barretenberg/numeric) add_subdirectory(barretenberg/plonk) add_subdirectory(barretenberg/plonk_honk_shared) @@ -176,8 +177,9 @@ if(NOT DISABLE_AZTEC_VM) endif() if(NOT WASM) - # enable merkle trees + # enable merkle trees and lmdb list(APPEND BARRETENBERG_TARGET_OBJECTS $) + list(APPEND BARRETENBERG_TARGET_OBJECTS $) endif() add_library( diff --git a/barretenberg/cpp/src/barretenberg/benchmark/indexed_tree_bench/indexed_tree.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/indexed_tree_bench/indexed_tree.bench.cpp index 65e3d30f7400..a388c30f1eae 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/indexed_tree_bench/indexed_tree.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/indexed_tree_bench/indexed_tree.bench.cpp @@ -3,7 +3,6 @@ #include "barretenberg/crypto/merkle_tree/hash.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp" #include "barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp" #include "barretenberg/crypto/merkle_tree/response.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/CMakeLists.txt index 4749a1a20216..093c0f704a48 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/CMakeLists.txt @@ -1,16 +1,11 @@ # merkle tree is agnostic to hash function barretenberg_module( crypto_merkle_tree - lmdb + lmdblib ) if (NOT FUZZING) # but the tests use pedersen and poseidon target_link_libraries(crypto_merkle_tree_tests PRIVATE stdlib_pedersen_hash stdlib_poseidon2) - add_dependencies(crypto_merkle_tree_tests lmdb_repo) - add_dependencies(crypto_merkle_tree_test_objects lmdb_repo) endif() -add_dependencies(crypto_merkle_tree lmdb_repo) -add_dependencies(crypto_merkle_tree_objects lmdb_repo) - diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp index fb16c9d90537..0922ca4aab84 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/append_only_tree/content_addressed_append_only_tree.test.cpp @@ -7,7 +7,6 @@ #include "barretenberg/common/thread_pool.hpp" #include "barretenberg/crypto/merkle_tree/hash.hpp" #include "barretenberg/crypto/merkle_tree/hash_path.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp" #include "barretenberg/crypto/merkle_tree/node_store/array_store.hpp" #include "barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp" @@ -15,6 +14,7 @@ #include "barretenberg/crypto/merkle_tree/signal.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" #include "barretenberg/relations/relation_parameters.hpp" #include #include @@ -29,6 +29,7 @@ using namespace bb; using namespace bb::crypto::merkle_tree; +using namespace bb::lmdblib; using Store = ContentAddressedCachedTreeStore; using TreeType = ContentAddressedAppendOnlyTree; diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp index d7774730aac2..aabb36d6ba7d 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/fixtures.hpp @@ -29,7 +29,7 @@ static std::vector VALUES = create_values(); inline std::string random_string() { std::stringstream ss; - ss << random_engine.get_random_uint256(); + ss << random_engine.get_random_uint32(); return ss.str(); } diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.cpp deleted file mode 100644 index c761ec99bd97..000000000000 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.cpp +++ /dev/null @@ -1,38 +0,0 @@ -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" -#include - -namespace bb::crypto::merkle_tree { -LMDBDatabase::LMDBDatabase(LMDBEnvironment::SharedPtr env, - const LMDBDatabaseCreationTransaction& transaction, - const std::string& name, - bool integerKeys, - bool reverseKeys, - MDB_cmp_func* cmp) - : _environment(std::move(env)) -{ - unsigned int flags = MDB_CREATE; - if (integerKeys) { - flags |= MDB_INTEGERKEY; - } - if (reverseKeys) { - flags |= MDB_REVERSEKEY; - } - call_lmdb_func("mdb_dbi_open", mdb_dbi_open, transaction.underlying(), name.c_str(), flags, &_dbi); - if (cmp != nullptr) { - call_lmdb_func("mdb_set_compare", mdb_set_compare, transaction.underlying(), _dbi, cmp); - } -} - -LMDBDatabase::~LMDBDatabase() -{ - call_lmdb_func(mdb_dbi_close, _environment->underlying(), _dbi); -} - -const MDB_dbi& LMDBDatabase::underlying() const -{ - return _dbi; -} -} // namespace bb::crypto::merkle_tree \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.cpp deleted file mode 100644 index 67c28f253c1b..000000000000 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.cpp +++ /dev/null @@ -1,14 +0,0 @@ -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.hpp" - -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include - -namespace bb::crypto::merkle_tree { -LMDBDatabaseCreationTransaction::LMDBDatabaseCreationTransaction(LMDBEnvironment::SharedPtr env) - : LMDBTransaction(std::move(env)) -{} -void LMDBDatabaseCreationTransaction::commit() const -{ - call_lmdb_func("mdb_txn_commit", mdb_txn_commit, _transaction); -} -} // namespace bb::crypto::merkle_tree \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.test.cpp deleted file mode 100644 index c8f13c5bdf7d..000000000000 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.test.cpp +++ /dev/null @@ -1,203 +0,0 @@ -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "barretenberg/common/serialize.hpp" -#include "barretenberg/common/streams.hpp" -#include "barretenberg/common/test.hpp" -#include "barretenberg/crypto/merkle_tree/fixtures.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp" -#include "barretenberg/crypto/merkle_tree/signal.hpp" -#include "barretenberg/crypto/merkle_tree/types.hpp" -#include "barretenberg/numeric/random/engine.hpp" -#include "barretenberg/numeric/uint128/uint128.hpp" -#include "barretenberg/numeric/uint256/uint256.hpp" -#include "barretenberg/polynomials/serialize.hpp" -#include "barretenberg/stdlib/primitives/field/field.hpp" -#include "lmdb_tree_store.hpp" - -using namespace bb::stdlib; -using namespace bb::crypto::merkle_tree; - -class LMDBEnvironmentTest : public testing::Test { - protected: - void SetUp() override - { - _directory = random_temp_directory(); - _mapSize = 1024 * 1024; - _maxReaders = 16; - std::filesystem::create_directories(_directory); - } - - void TearDown() override { std::filesystem::remove_all(_directory); } - - static std::string _directory; - static uint32_t _maxReaders; - static uint64_t _mapSize; -}; - -std::string LMDBEnvironmentTest::_directory; -uint32_t LMDBEnvironmentTest::_maxReaders; -uint64_t LMDBEnvironmentTest::_mapSize; - -std::vector serialise(std::string key) -{ - std::vector data(key.begin(), key.end()); - return data; -} - -TEST_F(LMDBEnvironmentTest, can_create_environment) -{ - EXPECT_NO_THROW(LMDBEnvironment environment( - LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, LMDBEnvironmentTest::_maxReaders)); -} - -TEST_F(LMDBEnvironmentTest, can_create_database) -{ - LMDBEnvironment::SharedPtr environment = std::make_shared( - LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, LMDBEnvironmentTest::_maxReaders); - - { - LMDBDatabaseCreationTransaction tx(environment); - LMDBDatabase::SharedPtr db = std::make_unique(environment, tx, "DB", false, false); - EXPECT_NO_THROW(tx.commit()); - } -} - -TEST_F(LMDBEnvironmentTest, can_write_to_database) -{ - LMDBEnvironment::SharedPtr environment = std::make_shared( - LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, LMDBEnvironmentTest::_maxReaders); - - LMDBDatabaseCreationTransaction tx(environment); - LMDBDatabase::SharedPtr db = std::make_unique(environment, tx, "DB", false, false); - EXPECT_NO_THROW(tx.commit()); - - { - LMDBTreeWriteTransaction::SharedPtr tx = std::make_shared(environment); - auto key = serialise(std::string("Key")); - auto data = serialise(std::string("TestData")); - EXPECT_NO_THROW(tx->put_value(key, data, *db)); - EXPECT_NO_THROW(tx->commit()); - } -} - -TEST_F(LMDBEnvironmentTest, can_read_from_database) -{ - LMDBEnvironment::SharedPtr environment = std::make_shared( - LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, LMDBEnvironmentTest::_maxReaders); - - LMDBDatabaseCreationTransaction tx(environment); - LMDBDatabase::SharedPtr db = std::make_unique(environment, tx, "DB", false, false); - EXPECT_NO_THROW(tx.commit()); - - { - LMDBTreeWriteTransaction::SharedPtr tx = std::make_shared(environment); - auto key = serialise(std::string("Key")); - auto data = serialise(std::string("TestData")); - EXPECT_NO_THROW(tx->put_value(key, data, *db)); - EXPECT_NO_THROW(tx->commit()); - } - - { - environment->wait_for_reader(); - LMDBTreeReadTransaction::SharedPtr tx = std::make_shared(environment); - auto key = serialise(std::string("Key")); - auto expected = serialise(std::string("TestData")); - std::vector data; - tx->get_value(key, data, *db); - EXPECT_EQ(data, expected); - } -} - -TEST_F(LMDBEnvironmentTest, can_write_and_read_multiple) -{ - LMDBEnvironment::SharedPtr environment = std::make_shared( - LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, LMDBEnvironmentTest::_maxReaders); - - LMDBDatabaseCreationTransaction tx(environment); - LMDBDatabase::SharedPtr db = std::make_unique(environment, tx, "DB", false, false); - EXPECT_NO_THROW(tx.commit()); - - uint64_t numValues = 10; - - { - for (uint64_t count = 0; count < numValues; count++) { - LMDBTreeWriteTransaction::SharedPtr tx = std::make_shared(environment); - auto key = serialise((std::stringstream() << "Key" << count).str()); - auto data = serialise((std::stringstream() << "TestData" << count).str()); - EXPECT_NO_THROW(tx->put_value(key, data, *db)); - EXPECT_NO_THROW(tx->commit()); - } - } - - { - for (uint64_t count = 0; count < numValues; count++) { - environment->wait_for_reader(); - LMDBTreeReadTransaction::SharedPtr tx = std::make_shared(environment); - auto key = serialise((std::stringstream() << "Key" << count).str()); - auto expected = serialise((std::stringstream() << "TestData" << count).str()); - std::vector data; - tx->get_value(key, data, *db); - EXPECT_EQ(data, expected); - } - } -} - -TEST_F(LMDBEnvironmentTest, can_read_multiple_threads) -{ - LMDBEnvironment::SharedPtr environment = - std::make_shared(LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, 2); - - LMDBDatabaseCreationTransaction tx(environment); - LMDBDatabase::SharedPtr db = std::make_unique(environment, tx, "DB", false, false); - EXPECT_NO_THROW(tx.commit()); - - uint64_t numValues = 10; - uint64_t numIterationsPerThread = 1000; - uint32_t numThreads = 16; - - { - for (uint64_t count = 0; count < numValues; count++) { - LMDBTreeWriteTransaction::SharedPtr tx = std::make_shared(environment); - auto key = serialise((std::stringstream() << "Key" << count).str()); - auto data = serialise((std::stringstream() << "TestData" << count).str()); - EXPECT_NO_THROW(tx->put_value(key, data, *db)); - EXPECT_NO_THROW(tx->commit()); - } - } - - { - auto func = [&]() -> void { - for (uint64_t iteration = 0; iteration < numIterationsPerThread; iteration++) { - for (uint64_t count = 0; count < numValues; count++) { - environment->wait_for_reader(); - LMDBTreeReadTransaction::SharedPtr tx = std::make_shared(environment); - auto key = serialise((std::stringstream() << "Key" << count).str()); - auto expected = serialise((std::stringstream() << "TestData" << count).str()); - std::vector data; - tx->get_value(key, data, *db); - EXPECT_EQ(data, expected); - } - } - }; - std::vector> threads; - for (uint64_t count = 0; count < numThreads; count++) { - threads.emplace_back(std::make_unique(func)); - } - for (uint64_t count = 0; count < numThreads; count++) { - threads[count]->join(); - } - } -} diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.cpp deleted file mode 100644 index 303e8f654ff8..000000000000 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" -#include - -namespace bb::crypto::merkle_tree { -LMDBTreeReadTransaction::LMDBTreeReadTransaction(LMDBEnvironment::SharedPtr env) - : LMDBTransaction(env, true) -{} - -LMDBTreeReadTransaction::~LMDBTreeReadTransaction() -{ - abort(); -} - -void LMDBTreeReadTransaction::abort() -{ - LMDBTransaction::abort(); - _environment->release_reader(); -} -} // namespace bb::crypto::merkle_tree diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.hpp deleted file mode 100644 index dd94b88b441e..000000000000 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.hpp +++ /dev/null @@ -1,38 +0,0 @@ -#pragma once -#include "barretenberg/common/serialize.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp" -#include "barretenberg/crypto/merkle_tree/types.hpp" -#include -#include -#include -#include -#include -#include - -namespace bb::crypto::merkle_tree { - -/** - * RAII wrapper around a read transaction. - * Contains various methods for retrieving values by their keys. - * Aborts the transaction upon object destruction. - */ -class LMDBTreeReadTransaction : public LMDBTransaction { - public: - using Ptr = std::unique_ptr; - using SharedPtr = std::shared_ptr; - - LMDBTreeReadTransaction(LMDBEnvironment::SharedPtr env); - LMDBTreeReadTransaction(const LMDBTreeReadTransaction& other) = delete; - LMDBTreeReadTransaction(LMDBTreeReadTransaction&& other) = delete; - LMDBTreeReadTransaction& operator=(const LMDBTreeReadTransaction& other) = delete; - LMDBTreeReadTransaction& operator=(LMDBTreeReadTransaction&& other) = delete; - - ~LMDBTreeReadTransaction() override; - - void abort() override; -}; -} // namespace bb::crypto::merkle_tree \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp index 45802caf3587..98549b6552c5 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.cpp @@ -1,10 +1,11 @@ #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp" #include "barretenberg/common/serialize.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" +#include "barretenberg/lmdblib/lmdb_db_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include "barretenberg/lmdblib/lmdb_store_base.hpp" #include "barretenberg/numeric/uint128/uint128.hpp" #include "barretenberg/numeric/uint256/uint256.hpp" #include "barretenberg/serialize/msgpack.hpp" @@ -48,73 +49,54 @@ int index_key_cmp(const MDB_val* a, const MDB_val* b) } LMDBTreeStore::LMDBTreeStore(std::string directory, std::string name, uint64_t mapSizeKb, uint64_t maxNumReaders) - : _name(std::move(name)) - , _directory(std::move(directory)) - , _environment(std::make_shared(_directory, mapSizeKb, 5, maxNumReaders)) + : LMDBStoreBase(directory, mapSizeKb, maxNumReaders, 5) + , _name(std::move(name)) { { - LMDBDatabaseCreationTransaction tx(_environment); + LMDBDatabaseCreationTransaction::Ptr tx = create_db_transaction(); _blockDatabase = - std::make_unique(_environment, tx, _name + BLOCKS_DB, false, false, block_key_cmp); - tx.commit(); + std::make_unique(_environment, *tx, _name + BLOCKS_DB, false, false, false, block_key_cmp); + tx->commit(); } { - LMDBDatabaseCreationTransaction tx(_environment); - _nodeDatabase = std::make_unique(_environment, tx, _name + NODES_DB, false, false, fr_key_cmp); - tx.commit(); + LMDBDatabaseCreationTransaction::Ptr tx = create_db_transaction(); + _nodeDatabase = + std::make_unique(_environment, *tx, _name + NODES_DB, false, false, false, fr_key_cmp); + tx->commit(); } { - LMDBDatabaseCreationTransaction tx(_environment); + LMDBDatabaseCreationTransaction::Ptr tx = create_db_transaction(); _leafKeyToIndexDatabase = - std::make_unique(_environment, tx, _name + LEAF_INDICES_DB, false, false, fr_key_cmp); - tx.commit(); + std::make_unique(_environment, *tx, _name + LEAF_INDICES_DB, false, false, false, fr_key_cmp); + tx->commit(); } { - LMDBDatabaseCreationTransaction tx(_environment); - _leafHashToPreImageDatabase = - std::make_unique(_environment, tx, _name + LEAF_PREIMAGES_DB, false, false, fr_key_cmp); - tx.commit(); + LMDBDatabaseCreationTransaction::Ptr tx = create_db_transaction(); + _leafHashToPreImageDatabase = std::make_unique( + _environment, *tx, _name + LEAF_PREIMAGES_DB, false, false, false, fr_key_cmp); + tx->commit(); } { - LMDBDatabaseCreationTransaction tx(_environment); - _indexToBlockDatabase = - std::make_unique(_environment, tx, _name + BLOCK_INDICES_DB, false, false, index_key_cmp); - tx.commit(); + LMDBDatabaseCreationTransaction::Ptr tx = create_db_transaction(); + _indexToBlockDatabase = std::make_unique( + _environment, *tx, _name + BLOCK_INDICES_DB, false, false, false, index_key_cmp); + tx->commit(); } } -LMDBTreeStore::WriteTransaction::Ptr LMDBTreeStore::create_write_transaction() const -{ - return std::make_unique(_environment); -} -LMDBTreeStore::ReadTransaction::Ptr LMDBTreeStore::create_read_transaction() -{ - _environment->wait_for_reader(); - return std::make_unique(_environment); -} - void LMDBTreeStore::get_stats(TreeDBStats& stats, ReadTransaction& tx) { - - MDB_stat stat; - MDB_envinfo info; - call_lmdb_func(mdb_env_info, _environment->underlying(), &info); - stats.mapSize = info.me_mapsize; - call_lmdb_func(mdb_stat, tx.underlying(), _blockDatabase->underlying(), &stat); - stats.blocksDBStats = DBStats(BLOCKS_DB, stat); - call_lmdb_func(mdb_stat, tx.underlying(), _leafHashToPreImageDatabase->underlying(), &stat); - stats.leafPreimagesDBStats = DBStats(LEAF_PREIMAGES_DB, stat); - call_lmdb_func(mdb_stat, tx.underlying(), _leafKeyToIndexDatabase->underlying(), &stat); - stats.leafIndicesDBStats = DBStats(LEAF_INDICES_DB, stat); - call_lmdb_func(mdb_stat, tx.underlying(), _nodeDatabase->underlying(), &stat); - stats.nodesDBStats = DBStats(NODES_DB, stat); - call_lmdb_func(mdb_stat, tx.underlying(), _indexToBlockDatabase->underlying(), &stat); - stats.blockIndicesDBStats = DBStats(BLOCK_INDICES_DB, stat); + stats.mapSize = _environment->get_map_size(); + stats.blocksDBStats = _blockDatabase->get_stats(tx); + stats.leafPreimagesDBStats = _leafHashToPreImageDatabase->get_stats(tx); + stats.leafIndicesDBStats = _leafKeyToIndexDatabase->get_stats(tx); + stats.nodesDBStats = _nodeDatabase->get_stats(tx); + stats.blockIndicesDBStats = _indexToBlockDatabase->get_stats(tx); } void LMDBTreeStore::write_block_data(const block_number_t& blockNumber, diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp index c67e13f51301..4fde37126b45 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp @@ -2,14 +2,14 @@ #include "barretenberg/common/log.hpp" #include "barretenberg/common/serialize.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp" #include "barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" +#include "barretenberg/lmdblib/lmdb_database.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_read_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_store_base.hpp" +#include "barretenberg/lmdblib/lmdb_write_transaction.hpp" #include "barretenberg/serialize/msgpack.hpp" #include "barretenberg/world_state/types.hpp" #include "lmdb.h" @@ -24,6 +24,8 @@ namespace bb::crypto::merkle_tree { +using namespace bb::lmdblib; + struct BlockPayload { index_t size; @@ -156,21 +158,18 @@ struct BlockIndexPayload { * data */ -class LMDBTreeStore { +class LMDBTreeStore : public LMDBStoreBase { public: using Ptr = std::unique_ptr; using SharedPtr = std::shared_ptr; - using ReadTransaction = LMDBTreeReadTransaction; - using WriteTransaction = LMDBTreeWriteTransaction; + using ReadTransaction = LMDBReadTransaction; + using WriteTransaction = LMDBWriteTransaction; LMDBTreeStore(std::string directory, std::string name, uint64_t mapSizeKb, uint64_t maxNumReaders); LMDBTreeStore(const LMDBTreeStore& other) = delete; LMDBTreeStore(LMDBTreeStore&& other) = delete; LMDBTreeStore& operator=(const LMDBTreeStore& other) = delete; LMDBTreeStore& operator=(LMDBTreeStore&& other) = delete; - ~LMDBTreeStore() = default; - - WriteTransaction::Ptr create_write_transaction() const; - ReadTransaction::Ptr create_read_transaction(); + ~LMDBTreeStore() override = default; void get_stats(TreeDBStats& stats, ReadTransaction& tx); @@ -233,8 +232,6 @@ class LMDBTreeStore { private: std::string _name; - std::string _directory; - LMDBEnvironment::SharedPtr _environment; LMDBDatabase::Ptr _blockDatabase; LMDBDatabase::Ptr _nodeDatabase; LMDBDatabase::Ptr _leafKeyToIndexDatabase; diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp index f7bcbf009f54..7a06f2f64a2a 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.test.cpp @@ -13,9 +13,9 @@ #include "barretenberg/common/test.hpp" #include "barretenberg/crypto/merkle_tree/fixtures.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" #include "barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" #include "barretenberg/numeric/random/engine.hpp" #include "barretenberg/numeric/uint128/uint128.hpp" #include "barretenberg/numeric/uint256/uint256.hpp" @@ -59,13 +59,13 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_block_data) blockData.size = 45; LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); store.write_block_data(3, blockData, *transaction); transaction->commit(); } { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); BlockPayload readBack; bool success = store.read_block_data(3, readBack, *transaction); EXPECT_TRUE(success); @@ -90,13 +90,13 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_meta_data) metaData.size = 60; LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); store.write_meta_data(metaData, *transaction); transaction->commit(); } { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); TreeMeta readBack; bool success = store.read_meta_data(readBack, *transaction); EXPECT_TRUE(success); @@ -118,7 +118,7 @@ TEST_F(LMDBTreeStoreTest, can_read_data_from_multiple_threads) metaData.size = 60; LMDBTreeStore store(_directory, "DB1", _mapSize, 2); { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); store.write_meta_data(metaData, *transaction); transaction->commit(); } @@ -129,7 +129,7 @@ TEST_F(LMDBTreeStoreTest, can_read_data_from_multiple_threads) { auto func = [&]() -> void { for (uint64_t iteration = 0; iteration < numIterationsPerThread; iteration++) { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); TreeMeta readBack; bool success = store.read_meta_data(readBack, *transaction); EXPECT_TRUE(success); @@ -156,7 +156,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_multiple_blocks_with_meta) blockData.blockNumber = i + start_block; blockData.root = VALUES[i]; blockData.size = 45 + (i * 97); - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); store.write_block_data(i + start_block, blockData, *transaction); TreeMeta meta; @@ -172,7 +172,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_multiple_blocks_with_meta) BlockPayload blockData; for (size_t i = 0; i < num_blocks; i++) { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); BlockPayload readBack; bool success = store.read_block_data(i + start_block, readBack, *transaction); EXPECT_TRUE(success); @@ -185,7 +185,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_multiple_blocks_with_meta) { TreeMeta meta; - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); store.read_meta_data(meta, *transaction); EXPECT_EQ(meta.committedSize, blockData.size); @@ -232,13 +232,13 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_leaf_indices) bb::fr key = VALUES[5]; LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); store.write_leaf_index(key, index, *transaction); transaction->commit(); } { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); index_t readBack = 0; bool success = store.read_leaf_index(key, readBack, *transaction); EXPECT_TRUE(success); @@ -258,13 +258,13 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_nodes) bb::fr key = VALUES[6]; LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); store.write_node(key, nodePayload, *transaction); transaction->commit(); } { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); NodePayload readBack; bool success = store.read_node(key, readBack, *transaction); EXPECT_TRUE(success); @@ -283,13 +283,13 @@ TEST_F(LMDBTreeStoreTest, can_write_and_read_leaves_by_hash) bb::fr key = VALUES[2]; LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); { - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); store.write_leaf_by_hash(key, leafData, *transaction); transaction->commit(); } { - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); PublicDataLeafValue readBack; bool success = store.read_leaf_by_hash(key, readBack, *transaction); EXPECT_TRUE(success); @@ -316,7 +316,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_by_index) LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); { // write all of the blocks. - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); for (auto block : blocks) { // the arg is block size so add 1 store.write_block_index_data(block.blockNumber, block.index + 1, *transaction); @@ -326,7 +326,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_by_index) { // read back some blocks and check them - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); block_number_t readBack = 0; EXPECT_TRUE(store.find_block_for_index(5, readBack, *transaction)); EXPECT_EQ(readBack, 1); @@ -348,7 +348,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_by_index) { // delete the last block - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); // the arg is block size so add 1 store.delete_block_index(blocks[4].index + 1, blocks[4].blockNumber, *transaction); transaction->commit(); @@ -356,7 +356,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_by_index) { // check the blocks again - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); block_number_t readBack = 0; EXPECT_TRUE(store.find_block_for_index(5, readBack, *transaction)); EXPECT_EQ(readBack, 1); @@ -377,7 +377,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_by_index) { // delete 2 more blocks - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); // the arg is block size so add 1 store.delete_block_index(blocks[3].index + 1, blocks[3].blockNumber, *transaction); store.delete_block_index(blocks[2].index + 1, blocks[2].blockNumber, *transaction); @@ -386,7 +386,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_by_index) { // check the blocks again - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); block_number_t readBack = 0; EXPECT_TRUE(store.find_block_for_index(5, readBack, *transaction)); EXPECT_EQ(readBack, 1); @@ -405,7 +405,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_by_index) { // delete non-exisatent indices to check it does nothing - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); // the arg is block size so add 1 store.delete_block_index(blocks[3].index + 1, blocks[3].blockNumber, *transaction); store.delete_block_index(blocks[2].index + 1, blocks[2].blockNumber, *transaction); @@ -416,7 +416,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_by_index) { // check the blocks again - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); block_number_t readBack = 0; EXPECT_TRUE(store.find_block_for_index(5, readBack, *transaction)); EXPECT_EQ(readBack, 1); @@ -449,7 +449,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_with_duplicate_in LMDBTreeStore store(_directory, "DB1", _mapSize, _maxReaders); { // write all of the blocks. we will write them in reverse order - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); for (auto block : blocks) { // the arg is block size so add 1 store.write_block_index_data(block.blockNumber, block.index + 1, *transaction); @@ -459,7 +459,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_with_duplicate_in { // we can't add a duplicate block at an index if it is not the next block number - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); // the arg is block size so add 1 EXPECT_THROW(store.write_block_index_data(3, 60 + 1, *transaction), std::runtime_error); EXPECT_THROW(store.write_block_index_data(6, 60 + 1, *transaction), std::runtime_error); @@ -470,7 +470,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_with_duplicate_in { // read back some blocks and check them - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); block_number_t readBack = 0; EXPECT_TRUE(store.find_block_for_index(5, readBack, *transaction)); EXPECT_EQ(readBack, 1); @@ -487,7 +487,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_with_duplicate_in { // attempting to delete block 2 at index 60 should fail as it is not the last block in the series at index 60 - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); // the arg is block size so add 1 EXPECT_THROW(store.delete_block_index(blocks[1].index + 1, blocks[1].blockNumber, *transaction), std::runtime_error); @@ -496,7 +496,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_with_duplicate_in { // read back some blocks and check them - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); block_number_t readBack = 0; EXPECT_TRUE(store.find_block_for_index(5, readBack, *transaction)); EXPECT_EQ(readBack, 1); @@ -513,7 +513,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_with_duplicate_in { // try and delete blocks that don't exist at index 60 - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); // the arg is block size so add 1 EXPECT_THROW(store.delete_block_index(blocks[1].index + 1, 2, *transaction), std::runtime_error); EXPECT_THROW(store.delete_block_index(blocks[1].index + 1, 5, *transaction), std::runtime_error); @@ -522,7 +522,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_with_duplicate_in { // read back some blocks and check them - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); block_number_t readBack = 0; EXPECT_TRUE(store.find_block_for_index(5, readBack, *transaction)); EXPECT_EQ(readBack, 1); @@ -538,7 +538,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_with_duplicate_in { // delete the last 2 blocks at index 60 - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); // the arg is block size so add 1 store.delete_block_index(blocks[3].index + 1, blocks[3].blockNumber, *transaction); store.delete_block_index(blocks[2].index + 1, blocks[2].blockNumber, *transaction); @@ -547,7 +547,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_with_duplicate_in { // check the blocks again - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); block_number_t readBack = 0; EXPECT_TRUE(store.find_block_for_index(5, readBack, *transaction)); EXPECT_EQ(readBack, 1); @@ -561,7 +561,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_with_duplicate_in { // delete the last final block at index 60 - LMDBTreeWriteTransaction::Ptr transaction = store.create_write_transaction(); + LMDBWriteTransaction::Ptr transaction = store.create_write_transaction(); // the arg is block size so add 1 // Only one block remains at index 60, try and delete one that doesn't exist, it should do nothing store.delete_block_index(blocks[3].index + 1, blocks[3].blockNumber, *transaction); @@ -573,7 +573,7 @@ TEST_F(LMDBTreeStoreTest, can_write_and_retrieve_block_numbers_with_duplicate_in { // check the blocks again - LMDBTreeReadTransaction::Ptr transaction = store.create_read_transaction(); + LMDBReadTransaction::Ptr transaction = store.create_read_transaction(); block_number_t readBack = 0; EXPECT_TRUE(store.find_block_for_index(5, readBack, *transaction)); EXPECT_EQ(readBack, 1); diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.cpp deleted file mode 100644 index 5e524ca2fff0..000000000000 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.cpp +++ /dev/null @@ -1,54 +0,0 @@ - - -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp" - -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp" -#include "lmdb.h" -#include - -namespace bb::crypto::merkle_tree { - -LMDBTreeWriteTransaction::LMDBTreeWriteTransaction(LMDBEnvironment::SharedPtr env) - : LMDBTransaction(std::move(env)) -{} - -LMDBTreeWriteTransaction::~LMDBTreeWriteTransaction() -{ - try_abort(); -} - -void LMDBTreeWriteTransaction::commit() -{ - if (state == TransactionState::ABORTED) { - throw std::runtime_error("Tried to commit reverted transaction"); - } - call_lmdb_func("mdb_txn_commit", mdb_txn_commit, _transaction); - state = TransactionState::COMMITTED; -} - -void LMDBTreeWriteTransaction::try_abort() -{ - if (state != TransactionState::OPEN) { - return; - } - LMDBTransaction::abort(); -} - -void LMDBTreeWriteTransaction::put_value(std::vector& key, std::vector& data, const LMDBDatabase& db) -{ - lmdb_queries::put_value(key, data, db, *this); -} - -void LMDBTreeWriteTransaction::put_value(std::vector& key, const index_t& data, const LMDBDatabase& db) -{ - lmdb_queries::put_value(key, data, db, *this); -} - -void LMDBTreeWriteTransaction::delete_value(std::vector& key, const LMDBDatabase& db) -{ - lmdb_queries::delete_value(key, db, *this); -} -} // namespace bb::crypto::merkle_tree diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp deleted file mode 100644 index 0ad9cdd5a9f3..000000000000 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp +++ /dev/null @@ -1,85 +0,0 @@ -#pragma once -#include "barretenberg/common/serialize.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp" -#include "barretenberg/crypto/merkle_tree/types.hpp" -#include "lmdb.h" -#include -#include - -namespace bb::crypto::merkle_tree { - -/** - * RAII wrapper for an LMDB write transaction. - * Provides methods for writing values by their key. - * Must be either committed to persist the changes or aborted to roll them back. - * Will automatically abort the transaction during destruction if changes have not been committed. - */ - -class LMDBTreeWriteTransaction : public LMDBTransaction { - public: - using Ptr = std::unique_ptr; - using SharedPtr = std::shared_ptr; - - LMDBTreeWriteTransaction(LMDBEnvironment::SharedPtr env); - LMDBTreeWriteTransaction(const LMDBTreeWriteTransaction& other) = delete; - LMDBTreeWriteTransaction(LMDBTreeWriteTransaction&& other) = delete; - LMDBTreeWriteTransaction& operator=(const LMDBTreeWriteTransaction& other) = delete; - LMDBTreeWriteTransaction& operator=(LMDBTreeWriteTransaction&& other) = delete; - ~LMDBTreeWriteTransaction() override; - - template void put_value(T& key, std::vector& data, const LMDBDatabase& db); - - template void put_value(T& key, const index_t& data, const LMDBDatabase& db); - - void put_value(std::vector& key, std::vector& data, const LMDBDatabase& db); - - void put_value(std::vector& key, const index_t& data, const LMDBDatabase& db); - - template void delete_value(T& key, const LMDBDatabase& db); - - void delete_value(std::vector& key, const LMDBDatabase& db); - - template void delete_all_values_greater_or_equal_key(const T& key, const LMDBDatabase& db) const; - - template void delete_all_values_lesser_or_equal_key(const T& key, const LMDBDatabase& db) const; - - void commit(); - - void try_abort(); -}; - -template -void LMDBTreeWriteTransaction::put_value(T& key, std::vector& data, const LMDBDatabase& db) -{ - std::vector keyBuffer = serialise_key(key); - put_value(keyBuffer, data, db); -} - -template void LMDBTreeWriteTransaction::put_value(T& key, const index_t& data, const LMDBDatabase& db) -{ - std::vector keyBuffer = serialise_key(key); - put_value(keyBuffer, data, db); -} - -template void LMDBTreeWriteTransaction::delete_value(T& key, const LMDBDatabase& db) -{ - std::vector keyBuffer = serialise_key(key); - lmdb_queries::delete_value(keyBuffer, db, *this); -} - -template -void LMDBTreeWriteTransaction::delete_all_values_greater_or_equal_key(const T& key, const LMDBDatabase& db) const -{ - lmdb_queries::delete_all_values_greater_or_equal_key(key, db, *this); -} - -template -void LMDBTreeWriteTransaction::delete_all_values_lesser_or_equal_key(const T& key, const LMDBDatabase& db) const -{ - lmdb_queries::delete_all_values_lesser_or_equal_key(key, db, *this); -} -} // namespace bb::crypto::merkle_tree \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.cpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.cpp deleted file mode 100644 index 939cd58dde14..000000000000 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.cpp +++ /dev/null @@ -1,90 +0,0 @@ -#include "barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_write_transaction.hpp" -#include - -namespace bb::crypto::merkle_tree::lmdb_queries { - -void put_value(std::vector& key, - std::vector& data, - const LMDBDatabase& db, - bb::crypto::merkle_tree::LMDBTreeWriteTransaction& tx) -{ - MDB_val dbKey; - dbKey.mv_size = key.size(); - dbKey.mv_data = (void*)key.data(); - - MDB_val dbVal; - dbVal.mv_size = data.size(); - dbVal.mv_data = (void*)data.data(); - call_lmdb_func("mdb_put", mdb_put, tx.underlying(), db.underlying(), &dbKey, &dbVal, 0U); -} - -void put_value(std::vector& key, - const index_t& data, - const LMDBDatabase& db, - bb::crypto::merkle_tree::LMDBTreeWriteTransaction& tx) -{ - MDB_val dbKey; - dbKey.mv_size = key.size(); - dbKey.mv_data = (void*)key.data(); - - // use the serialise key method for serialising the index - std::vector serialised = serialise_key(data); - - MDB_val dbVal; - dbVal.mv_size = serialised.size(); - dbVal.mv_data = (void*)serialised.data(); - call_lmdb_func("mdb_put", mdb_put, tx.underlying(), db.underlying(), &dbKey, &dbVal, 0U); -} - -void delete_value(std::vector& key, - const LMDBDatabase& db, - bb::crypto::merkle_tree::LMDBTreeWriteTransaction& tx) -{ - MDB_val dbKey; - dbKey.mv_size = key.size(); - dbKey.mv_data = (void*)key.data(); - - MDB_val* dbVal = nullptr; - int code = call_lmdb_func_with_return(mdb_del, tx.underlying(), db.underlying(), &dbKey, dbVal); - if (code != 0 && code != MDB_NOTFOUND) { - throw_error("mdb_del", code); - } -} - -bool get_value(std::vector& key, - std::vector& data, - const LMDBDatabase& db, - const bb::crypto::merkle_tree::LMDBTransaction& tx) -{ - MDB_val dbKey; - dbKey.mv_size = key.size(); - dbKey.mv_data = (void*)key.data(); - - MDB_val dbVal; - if (!call_lmdb_func(mdb_get, tx.underlying(), db.underlying(), &dbKey, &dbVal)) { - return false; - } - copy_to_vector(dbVal, data); - return true; -} - -bool get_value(std::vector& key, - index_t& data, - const LMDBDatabase& db, - const bb::crypto::merkle_tree::LMDBTransaction& tx) -{ - MDB_val dbKey; - dbKey.mv_size = key.size(); - dbKey.mv_data = (void*)key.data(); - - MDB_val dbVal; - if (!call_lmdb_func(mdb_get, tx.underlying(), db.underlying(), &dbKey, &dbVal)) { - return false; - } - // use the deserialise key method for deserialising the index - deserialise_key(dbVal.mv_data, data); - return true; -} -} // namespace bb::crypto::merkle_tree::lmdb_queries \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp index 5fce392b619f..76034ab40bd3 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp @@ -1,11 +1,11 @@ #pragma once #include "./tree_meta.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include "barretenberg/lmdblib/lmdb_transaction.hpp" #include "barretenberg/numeric/uint256/uint256.hpp" #include "barretenberg/serialize/msgpack.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp index 54a1fa3e9be7..ae5feb555fa4 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp +++ b/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/types.hpp @@ -1,12 +1,20 @@ #pragma once #include "barretenberg/ecc/curves/bn254/fr.hpp" +#include "barretenberg/lmdblib/types.hpp" #include "lmdb.h" #include #include namespace bb::crypto::merkle_tree { + +using namespace bb::lmdblib; + using index_t = uint64_t; using block_number_t = uint64_t; +using LeafIndexKeyType = uint64_t; +using BlockMetaKeyType = uint64_t; +using FrKeyType = uint256_t; +using MetaKeyType = uint8_t; struct RequestContext { bool includeUncommitted; @@ -51,53 +59,6 @@ const std::string LEAF_PREIMAGES_DB = "leaf preimages"; const std::string LEAF_INDICES_DB = "leaf indices"; const std::string BLOCK_INDICES_DB = "block indices"; -struct DBStats { - std::string name; - uint64_t numDataItems; - uint64_t totalUsedSize; - - DBStats() = default; - DBStats(const DBStats& other) = default; - DBStats(DBStats&& other) noexcept { *this = std::move(other); } - ~DBStats() = default; - DBStats(std::string name, MDB_stat& stat) - : name(std::move(name)) - , numDataItems(stat.ms_entries) - , totalUsedSize(stat.ms_psize * (stat.ms_branch_pages + stat.ms_leaf_pages + stat.ms_overflow_pages)) - {} - DBStats(const std::string& name, uint64_t numDataItems, uint64_t totalUsedSize) - : name(name) - , numDataItems(numDataItems) - , totalUsedSize(totalUsedSize) - {} - - MSGPACK_FIELDS(name, numDataItems, totalUsedSize) - - bool operator==(const DBStats& other) const - { - return name == other.name && numDataItems == other.numDataItems && totalUsedSize == other.totalUsedSize; - } - - DBStats& operator=(const DBStats& other) = default; - - DBStats& operator=(DBStats&& other) noexcept - { - if (this != &other) { - name = std::move(other.name); - numDataItems = other.numDataItems; - totalUsedSize = other.totalUsedSize; - } - return *this; - } - - friend std::ostream& operator<<(std::ostream& os, const DBStats& stats) - { - os << "DB " << stats.name << ", num items: " << stats.numDataItems - << ", total used size: " << stats.totalUsedSize; - return os; - } -}; - struct TreeDBStats { uint64_t mapSize; DBStats blocksDBStats; diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/lmdblib/CMakeLists.txt new file mode 100644 index 000000000000..8b11beedbdf6 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/CMakeLists.txt @@ -0,0 +1,12 @@ +barretenberg_module(lmdblib lmdb numeric) + +# add explicit dependencies to external C lib +add_dependencies(lmdblib lmdb) +add_dependencies(lmdblib lmdb_repo) +add_dependencies(lmdblib_objects lmdb) +add_dependencies(lmdblib_objects lmdb_repo) +add_dependencies(lmdblib_tests lmdb) +add_dependencies(lmdblib_tests lmdb_repo) +add_dependencies(lmdblib_test_objects lmdb) +add_dependencies(lmdblib_test_objects lmdb_repo) + diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/fixtures.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/fixtures.hpp new file mode 100644 index 000000000000..2e6cc670dba4 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/fixtures.hpp @@ -0,0 +1,40 @@ +#include "barretenberg/lmdblib/types.hpp" +#include "barretenberg/numeric/random/engine.hpp" +#include + +namespace bb::lmdblib { +const uint32_t NUM_VALUES = 1024; +inline auto& engine = numeric::get_debug_randomness(); +inline auto& random_engine = numeric::get_randomness(); + +inline std::string random_string() +{ + std::stringstream ss; + ss << random_engine.get_random_uint32(); + return ss.str(); +} + +inline std::string random_temp_directory() +{ + std::stringstream ss; + ss << "/tmp/lmdb/" << random_string(); + return ss.str(); +} + +inline std::vector serialise(std::string key) +{ + std::vector data(key.begin(), key.end()); + return data; +} + +inline Key get_key(int64_t keyCount) +{ + return serialise((std::stringstream() << "Key" << keyCount).str()); +} + +inline Value get_value(int64_t keyCount, int64_t valueCount) +{ + return serialise((std::stringstream() << "Key" << keyCount << "Data" << valueCount).str()); +} + +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_cursor.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_cursor.cpp new file mode 100644 index 000000000000..62489c0c1cbf --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_cursor.cpp @@ -0,0 +1,74 @@ +#include "barretenberg/lmdblib/lmdb_cursor.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include "barretenberg/lmdblib/lmdb_read_transaction.hpp" +#include "barretenberg/lmdblib/queries.hpp" +#include "lmdb.h" +#include + +namespace bb::lmdblib { +LMDBCursor::LMDBCursor(LMDBReadTransaction::SharedPtr tx, LMDBDatabase::SharedPtr db, uint64_t id) + : _tx(tx) + , _db(db) + , _id(id) +{ + call_lmdb_func("mdb_cursor_open", mdb_cursor_open, tx->underlying(), db->underlying(), &_cursor); +} + +LMDBCursor::~LMDBCursor() +{ + call_lmdb_func(mdb_cursor_close, _cursor); +} + +MDB_cursor* LMDBCursor::underlying() const +{ + return _cursor; +} + +uint64_t LMDBCursor::id() const +{ + return _id; +} + +bool LMDBCursor::set_at_key(Key& key) const +{ + std::lock_guard lock(_mtx); + return lmdb_queries::set_at_key(*this, key); +} + +bool LMDBCursor::set_at_key_gte(Key& key) const +{ + std::lock_guard lock(_mtx); + return lmdb_queries::set_at_key_gte(*this, key); +} + +bool LMDBCursor::set_at_start() const +{ + std::lock_guard lock(_mtx); + return lmdb_queries::set_at_start(*this); +} + +bool LMDBCursor::set_at_end() const +{ + std::lock_guard lock(_mtx); + return lmdb_queries::set_at_end(*this); +} + +bool LMDBCursor::read_next(uint64_t numKeysToRead, KeyDupValuesVector& keyValuePairs) const +{ + std::lock_guard lock(_mtx); + if (_db->duplicate_keys_permitted()) { + return lmdb_queries::read_next_dup(*this, keyValuePairs, numKeysToRead); + } + return lmdb_queries::read_next(*this, keyValuePairs, numKeysToRead); +} + +bool LMDBCursor::read_prev(uint64_t numKeysToRead, KeyDupValuesVector& keyValuePairs) const +{ + std::lock_guard lock(_mtx); + if (_db->duplicate_keys_permitted()) { + return lmdb_queries::read_prev_dup(*this, keyValuePairs, numKeysToRead); + } + return lmdb_queries::read_prev(*this, keyValuePairs, numKeysToRead); +} + +} // namespace bb::lmdblib diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_cursor.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_cursor.hpp new file mode 100644 index 000000000000..482deff8c62d --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_cursor.hpp @@ -0,0 +1,40 @@ +#pragma once +#include "barretenberg/lmdblib/types.hpp" +#include "lmdb.h" +#include +#include + +namespace bb::lmdblib { +class LMDBReadTransaction; +class LMDBDatabase; +class LMDBCursor { + public: + using Ptr = std::unique_ptr; + using SharedPtr = std::shared_ptr; + + LMDBCursor(std::shared_ptr tx, std::shared_ptr db, uint64_t id); + LMDBCursor(const LMDBCursor& other) = delete; + LMDBCursor(LMDBCursor&& other) = delete; + LMDBCursor& operator=(const LMDBCursor& other) = delete; + LMDBCursor& operator=(LMDBCursor&& other) = delete; + ~LMDBCursor(); + + MDB_cursor* underlying() const; + + uint64_t id() const; + + bool set_at_key(Key& key) const; + bool set_at_key_gte(Key& key) const; + bool set_at_start() const; + bool set_at_end() const; + bool read_next(uint64_t numKeysToRead, KeyDupValuesVector& keyValuePairs) const; + bool read_prev(uint64_t numKeysToRead, KeyDupValuesVector& keyValuePairs) const; + + private: + mutable std::mutex _mtx; + std::shared_ptr _tx; + std::shared_ptr _db; + uint64_t _id; + MDB_cursor* _cursor; +}; +} // namespace bb::lmdblib diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_database.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_database.cpp new file mode 100644 index 000000000000..779848002129 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_database.cpp @@ -0,0 +1,65 @@ +#include "barretenberg/lmdblib/lmdb_database.hpp" +#include "barretenberg/lmdblib/lmdb_db_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include "barretenberg/lmdblib/lmdb_read_transaction.hpp" +#include "barretenberg/lmdblib/types.hpp" +#include "lmdb.h" +#include + +namespace bb::lmdblib { +LMDBDatabase::LMDBDatabase(LMDBEnvironment::SharedPtr env, + const LMDBDatabaseCreationTransaction& transaction, + const std::string& name, + bool integerKeys, + bool reverseKeys, + bool duplicateKeysPermitted, + MDB_cmp_func* cmp) + : dbName(name) + , duplicateKeysPermitted(duplicateKeysPermitted) + , environment(std::move(env)) +{ + unsigned int flags = MDB_CREATE; + if (integerKeys) { + flags |= MDB_INTEGERKEY; + } + if (reverseKeys) { + flags |= MDB_REVERSEKEY; + } + if (duplicateKeysPermitted) { + flags |= MDB_DUPSORT; + } + call_lmdb_func("mdb_dbi_open", mdb_dbi_open, transaction.underlying(), name.c_str(), flags, &_dbi); + if (cmp != nullptr) { + call_lmdb_func("mdb_set_compare", mdb_set_compare, transaction.underlying(), _dbi, cmp); + } +} + +LMDBDatabase::~LMDBDatabase() +{ + call_lmdb_func(mdb_dbi_close, environment->underlying(), _dbi); +} + +const MDB_dbi& LMDBDatabase::underlying() const +{ + return _dbi; +} + +const std::string& LMDBDatabase::name() const +{ + return dbName; +} + +bool LMDBDatabase::duplicate_keys_permitted() const +{ + return duplicateKeysPermitted; +} + +DBStats LMDBDatabase::get_stats(LMDBReadTransaction& tx) +{ + MDB_stat stat; + call_lmdb_func(mdb_stat, tx.underlying(), underlying(), &stat); + return DBStats(name(), stat); +} + +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_database.hpp similarity index 67% rename from barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp rename to barretenberg/cpp/src/barretenberg/lmdblib/lmdb_database.hpp index 8f071901414d..2f8960647584 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_database.hpp @@ -1,10 +1,11 @@ #pragma once -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/types.hpp" -namespace bb::crypto::merkle_tree { +namespace bb::lmdblib { class LMDBDatabaseCreationTransaction; +class LMDBReadTransaction; /** * RAII wrapper atound the opening and closing of an LMDB database * Contains a reference to its LMDB environment @@ -19,6 +20,7 @@ class LMDBDatabase { const std::string& name, bool integerKeys = false, bool reverseKeys = false, + bool duplicateKeysPermitted = false, MDB_cmp_func* cmp = nullptr); LMDBDatabase(const LMDBDatabase& other) = delete; @@ -29,9 +31,14 @@ class LMDBDatabase { ~LMDBDatabase(); const MDB_dbi& underlying() const; + const std::string& name() const; + bool duplicate_keys_permitted() const; + DBStats get_stats(LMDBReadTransaction& tx); private: + std::string dbName; + bool duplicateKeysPermitted; MDB_dbi _dbi; - LMDBEnvironment::SharedPtr _environment; + LMDBEnvironment::SharedPtr environment; }; -} // namespace bb::crypto::merkle_tree +} // namespace bb::lmdblib diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_db_transaction.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_db_transaction.cpp new file mode 100644 index 000000000000..549e3c78baad --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_db_transaction.cpp @@ -0,0 +1,28 @@ +#include "barretenberg/lmdblib/lmdb_db_transaction.hpp" + +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include + +namespace bb::lmdblib { +LMDBDatabaseCreationTransaction::LMDBDatabaseCreationTransaction(LMDBEnvironment::SharedPtr env) + : LMDBTransaction(std::move(env)) +{} +LMDBDatabaseCreationTransaction::~LMDBDatabaseCreationTransaction() +{ + try_abort(); + _environment->release_writer(); +} +void LMDBDatabaseCreationTransaction::commit() +{ + if (state == TransactionState::ABORTED) { + throw std::runtime_error("Tried to commit reverted transaction"); + } + call_lmdb_func("mdb_txn_commit", mdb_txn_commit, _transaction); + state = TransactionState::COMMITTED; +} + +void LMDBDatabaseCreationTransaction::try_abort() +{ + LMDBTransaction::abort(); +} +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_db_transaction.hpp similarity index 75% rename from barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.hpp rename to barretenberg/cpp/src/barretenberg/lmdblib/lmdb_db_transaction.hpp index b98306eb61bd..e0fd2a3ad25c 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_db_transaction.hpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_db_transaction.hpp @@ -1,7 +1,7 @@ #pragma once -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_transaction.hpp" -namespace bb::crypto::merkle_tree { +namespace bb::lmdblib { /* * RAII wrapper to construct a transaction for the purpose of creating/opening a database @@ -16,8 +16,9 @@ class LMDBDatabaseCreationTransaction : public LMDBTransaction { LMDBDatabaseCreationTransaction& operator=(const LMDBDatabaseCreationTransaction& other) = delete; LMDBDatabaseCreationTransaction& operator=(LMDBDatabaseCreationTransaction&& other) = delete; - ~LMDBDatabaseCreationTransaction() override = default; - void commit() const; + ~LMDBDatabaseCreationTransaction() override; + void commit(); + void try_abort(); }; -} // namespace bb::crypto::merkle_tree \ No newline at end of file +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.cpp similarity index 68% rename from barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.cpp rename to barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.cpp index ab4a2b188fbc..921dfdfdcc39 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.cpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.cpp @@ -1,16 +1,19 @@ -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" #include "lmdb.h" +#include #include #include -namespace bb::crypto::merkle_tree { +namespace bb::lmdblib { + LMDBEnvironment::LMDBEnvironment(const std::string& directory, uint64_t mapSizeKB, uint32_t maxNumDBs, uint32_t maxNumReaders) - : _maxReaders(maxNumReaders) - , _numReaders(0) + : _id(0) + , _readGuard(maxNumReaders) + , _writeGuard(1) // LMDB only permits one write transaction at a time { call_lmdb_func("mdb_env_create", mdb_env_create, &_mdbEnv); uint64_t kb = 1024; @@ -34,18 +37,22 @@ LMDBEnvironment::LMDBEnvironment(const std::string& directory, void LMDBEnvironment::wait_for_reader() { - std::unique_lock lock(_readersLock); - if (_numReaders >= _maxReaders) { - _readersCondition.wait(lock, [&] { return _numReaders < _maxReaders; }); - } - ++_numReaders; + _readGuard.wait(); } void LMDBEnvironment::release_reader() { - std::unique_lock lock(_readersLock); - --_numReaders; - _readersCondition.notify_one(); + _readGuard.release(); +} + +void LMDBEnvironment::wait_for_writer() +{ + _writeGuard.wait(); +} + +void LMDBEnvironment::release_writer() +{ + _writeGuard.release(); } LMDBEnvironment::~LMDBEnvironment() @@ -58,4 +65,10 @@ MDB_env* LMDBEnvironment::underlying() const return _mdbEnv; } -} // namespace bb::crypto::merkle_tree +uint64_t LMDBEnvironment::get_map_size() const +{ + MDB_envinfo info; + call_lmdb_func(mdb_env_info, _mdbEnv, &info); + return info.me_mapsize; +} +} // namespace bb::lmdblib diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.hpp similarity index 60% rename from barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp rename to barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.hpp index f6c10dc88fda..9cea0e5edbb4 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.hpp @@ -1,11 +1,14 @@ #pragma once +#include #include +#include #include #include #include #include -namespace bb::crypto::merkle_tree { +namespace bb::lmdblib { + /* * RAII wrapper around an LMDB environment. * Opens/creates the environemnt and manages read access to the enviroment. @@ -37,11 +40,46 @@ class LMDBEnvironment { void release_reader(); + void wait_for_writer(); + + void release_writer(); + + uint64_t getNextId() { return _id++; } + + uint64_t get_map_size() const; + private: + std::atomic_uint64_t _id; MDB_env* _mdbEnv; - uint32_t _maxReaders; - uint32_t _numReaders; - std::mutex _readersLock; - std::condition_variable _readersCondition; + + struct ResourceGuard { + uint32_t _maxAllowed; + uint32_t _current; + std::mutex _lock; + std::condition_variable _condition; + + ResourceGuard(uint32_t maxAllowed) + : _maxAllowed(maxAllowed) + , _current(0) + {} + + void wait() + { + std::unique_lock lock(_lock); + if (_current >= _maxAllowed) { + _condition.wait(lock, [&] { return _current < _maxAllowed; }); + } + ++_current; + } + + void release() + { + std::unique_lock lock(_lock); + --_current; + _condition.notify_one(); + } + }; + ResourceGuard _readGuard; + ResourceGuard _writeGuard; }; -} // namespace bb::crypto::merkle_tree \ No newline at end of file +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.test.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.test.cpp new file mode 100644 index 000000000000..1eb1602cf2cc --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_environment.test.cpp @@ -0,0 +1,212 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "barretenberg/common/serialize.hpp" +#include "barretenberg/common/streams.hpp" +#include "barretenberg/common/test.hpp" +#include "barretenberg/lmdblib/fixtures.hpp" +#include "barretenberg/lmdblib/lmdb_database.hpp" +#include "barretenberg/lmdblib/lmdb_db_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_read_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_write_transaction.hpp" +#include "barretenberg/lmdblib/queries.hpp" + +using namespace bb::lmdblib; + +class LMDBEnvironmentTest : public testing::Test { + protected: + void SetUp() override + { + _directory = random_temp_directory(); + _mapSize = 1024 * 1024; + _maxReaders = 16; + std::filesystem::create_directories(_directory); + } + + void TearDown() override { std::filesystem::remove_all(_directory); } + + static std::string _directory; + static uint32_t _maxReaders; + static uint64_t _mapSize; +}; + +std::string LMDBEnvironmentTest::_directory; +uint32_t LMDBEnvironmentTest::_maxReaders; +uint64_t LMDBEnvironmentTest::_mapSize; + +TEST_F(LMDBEnvironmentTest, can_create_environment) +{ + EXPECT_NO_THROW(LMDBEnvironment environment( + LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, LMDBEnvironmentTest::_maxReaders)); +} + +TEST_F(LMDBEnvironmentTest, can_create_database) +{ + LMDBEnvironment::SharedPtr environment = std::make_shared( + LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, LMDBEnvironmentTest::_maxReaders); + + { + environment->wait_for_writer(); + LMDBDatabaseCreationTransaction tx(environment); + LMDBDatabase::SharedPtr db = std::make_unique(environment, tx, "DB", false, false); + EXPECT_NO_THROW(tx.commit()); + } +} + +TEST_F(LMDBEnvironmentTest, can_write_to_database) +{ + LMDBEnvironment::SharedPtr environment = std::make_shared( + LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, LMDBEnvironmentTest::_maxReaders); + + LMDBDatabase::SharedPtr db; + { + environment->wait_for_writer(); + LMDBDatabaseCreationTransaction tx(environment); + db = std::make_unique(environment, tx, "DB", false, false); + EXPECT_NO_THROW(tx.commit()); + } + + { + environment->wait_for_writer(); + LMDBWriteTransaction::Ptr tx = std::make_unique(environment); + auto key = get_key(0); + auto data = get_value(0, 0); + EXPECT_NO_THROW(tx->put_value(key, data, *db)); + EXPECT_NO_THROW(tx->commit()); + } +} + +TEST_F(LMDBEnvironmentTest, can_read_from_database) +{ + LMDBEnvironment::SharedPtr environment = std::make_shared( + LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, LMDBEnvironmentTest::_maxReaders); + LMDBDatabase::SharedPtr db; + + { + environment->wait_for_writer(); + LMDBDatabaseCreationTransaction tx(environment); + db = std::make_unique(environment, tx, "DB", false, false); + EXPECT_NO_THROW(tx.commit()); + } + + { + environment->wait_for_writer(); + LMDBWriteTransaction::Ptr tx = std::make_unique(environment); + auto key = get_key(0); + auto data = get_value(0, 0); + EXPECT_NO_THROW(tx->put_value(key, data, *db)); + EXPECT_NO_THROW(tx->commit()); + } + + { + environment->wait_for_reader(); + LMDBReadTransaction::Ptr tx = std::make_unique(environment); + auto key = get_key(0); + auto expected = get_value(0, 0); + std::vector data; + tx->get_value(key, data, *db); + EXPECT_EQ(data, expected); + } +} + +TEST_F(LMDBEnvironmentTest, can_write_and_read_multiple) +{ + LMDBEnvironment::SharedPtr environment = std::make_shared( + LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, LMDBEnvironmentTest::_maxReaders); + + LMDBDatabase::SharedPtr db; + + { + environment->wait_for_writer(); + LMDBDatabaseCreationTransaction tx(environment); + db = std::make_unique(environment, tx, "DB", false, false); + EXPECT_NO_THROW(tx.commit()); + } + + int64_t numValues = 10; + + { + for (int64_t count = 0; count < numValues; count++) { + environment->wait_for_writer(); + LMDBWriteTransaction::Ptr tx = std::make_unique(environment); + auto key = get_key(count); + auto data = get_value(count, 0); + EXPECT_NO_THROW(tx->put_value(key, data, *db)); + EXPECT_NO_THROW(tx->commit()); + } + } + + { + for (int64_t count = 0; count < numValues; count++) { + environment->wait_for_reader(); + LMDBReadTransaction::Ptr tx = std::make_unique(environment); + auto key = get_key(count); + auto expected = get_value(count, 0); + std::vector data; + tx->get_value(key, data, *db); + EXPECT_EQ(data, expected); + } + } +} + +TEST_F(LMDBEnvironmentTest, can_read_multiple_threads) +{ + LMDBEnvironment::SharedPtr environment = + std::make_shared(LMDBEnvironmentTest::_directory, LMDBEnvironmentTest::_mapSize, 1, 2); + + LMDBDatabase::SharedPtr db; + { + environment->wait_for_writer(); + LMDBDatabaseCreationTransaction tx(environment); + db = std::make_unique(environment, tx, "DB", false, false); + EXPECT_NO_THROW(tx.commit()); + } + + int64_t numValues = 10; + int64_t numIterationsPerThread = 1000; + uint32_t numThreads = 16; + + { + for (int64_t count = 0; count < numValues; count++) { + environment->wait_for_writer(); + LMDBWriteTransaction::Ptr tx = std::make_unique(environment); + auto key = get_key(count); + auto expected = get_value(count, 0); + EXPECT_NO_THROW(tx->put_value(key, expected, *db)); + EXPECT_NO_THROW(tx->commit()); + } + } + + { + auto func = [&]() -> void { + for (int64_t iteration = 0; iteration < numIterationsPerThread; iteration++) { + for (int64_t count = 0; count < numValues; count++) { + environment->wait_for_reader(); + LMDBReadTransaction::Ptr tx = std::make_unique(environment); + auto key = get_key(count); + auto expected = get_value(count, 0); + std::vector data; + tx->get_value(key, data, *db); + EXPECT_EQ(data, expected); + } + } + }; + std::vector> threads; + for (uint64_t count = 0; count < numThreads; count++) { + threads.emplace_back(std::make_unique(func)); + } + for (uint64_t count = 0; count < numThreads; count++) { + threads[count]->join(); + } + } +} diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_helpers.cpp similarity index 92% rename from barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.cpp rename to barretenberg/cpp/src/barretenberg/lmdblib/lmdb_helpers.cpp index 1f073a76371f..fa4932a7d649 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.cpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_helpers.cpp @@ -1,4 +1,4 @@ -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" #include "barretenberg/numeric/uint256/uint256.hpp" #include "lmdb.h" #include @@ -15,7 +15,7 @@ #include #endif -namespace bb::crypto::merkle_tree { +namespace bb::lmdblib { void throw_error(const std::string& errorString, int error) { std::stringstream ss; @@ -80,4 +80,4 @@ void copy_to_vector(const MDB_val& dbVal, std::vector& target) std::vector temp = mdb_val_to_vector(dbVal); target.swap(temp); } -} // namespace bb::crypto::merkle_tree \ No newline at end of file +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_helpers.hpp similarity index 81% rename from barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp rename to barretenberg/cpp/src/barretenberg/lmdblib/lmdb_helpers.hpp index cae491afa0b2..6117fc94a6f0 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_helpers.hpp @@ -1,18 +1,10 @@ -#pragma once -#include "barretenberg/crypto/merkle_tree/types.hpp" -#include "barretenberg/numeric/uint128/uint128.hpp" +#pragma once #include "barretenberg/numeric/uint256/uint256.hpp" -#include -#include +#include "lmdb.h" +#include #include - -namespace bb::crypto::merkle_tree { -using LeafIndexKeyType = uint64_t; -using BlockMetaKeyType = uint64_t; -using FrKeyType = uint256_t; -using MetaKeyType = uint8_t; - +namespace bb::lmdblib { void throw_error(const std::string& errorString, int error); int size_cmp(const MDB_val* a, const MDB_val* b); @@ -66,4 +58,4 @@ template void call_lmdb_func(void (*f)(TArgs...), TArgs... a { f(args...); } -} // namespace bb::crypto::merkle_tree \ No newline at end of file +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_read_transaction.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_read_transaction.cpp new file mode 100644 index 000000000000..51a8c7f754cd --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_read_transaction.cpp @@ -0,0 +1,16 @@ +#include "barretenberg/lmdblib/lmdb_read_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include + +namespace bb::lmdblib { +LMDBReadTransaction::LMDBReadTransaction(LMDBEnvironment::SharedPtr env) + : LMDBTransaction(env, true) +{} + +LMDBReadTransaction::~LMDBReadTransaction() +{ + LMDBTransaction::abort(); + _environment->release_reader(); +} +} // namespace bb::lmdblib diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_read_transaction.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_read_transaction.hpp new file mode 100644 index 000000000000..298651d10792 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_read_transaction.hpp @@ -0,0 +1,35 @@ +#pragma once +#include "barretenberg/common/serialize.hpp" +#include "barretenberg/crypto/merkle_tree/types.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include "barretenberg/lmdblib/lmdb_transaction.hpp" +#include "barretenberg/lmdblib/queries.hpp" +#include +#include +#include +#include +#include +#include + +namespace bb::lmdblib { + +/** + * RAII wrapper around a read transaction. + * Contains various methods for retrieving values by their keys. + * Aborts the transaction upon object destruction. + */ +class LMDBReadTransaction : public LMDBTransaction { + public: + using Ptr = std::unique_ptr; + using SharedPtr = std::shared_ptr; + + LMDBReadTransaction(LMDBEnvironment::SharedPtr env); + LMDBReadTransaction(const LMDBReadTransaction& other) = delete; + LMDBReadTransaction(LMDBReadTransaction&& other) = delete; + LMDBReadTransaction& operator=(const LMDBReadTransaction& other) = delete; + LMDBReadTransaction& operator=(LMDBReadTransaction&& other) = delete; + + ~LMDBReadTransaction() override; +}; +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.cpp new file mode 100644 index 000000000000..4b99b79120ed --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.cpp @@ -0,0 +1,184 @@ +#include "barretenberg/lmdblib/lmdb_store.hpp" +#include "barretenberg/lmdblib/lmdb_database.hpp" +#include "barretenberg/lmdblib/lmdb_db_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_store_base.hpp" +#include "barretenberg/lmdblib/lmdb_write_transaction.hpp" +#include "barretenberg/lmdblib/types.hpp" +#include "lmdb.h" +#include +#include +#include +#include +#include + +namespace bb::lmdblib { +LMDBStore::LMDBStore(std::string directory, uint64_t mapSizeKb, uint64_t maxNumReaders, uint64_t maxDbs) + : LMDBStoreBase(std::move(directory), mapSizeKb, maxNumReaders, maxDbs) +{} + +void LMDBStore::open_database(const std::string& name, bool duplicateKeysPermitted) +{ + LMDBDatabase::SharedPtr db; + { + LMDBDatabaseCreationTransaction::Ptr tx = create_db_transaction(); + try { + db = std::make_shared(_environment, *tx, name, false, false, duplicateKeysPermitted); + tx->commit(); + } catch (std::exception& e) { + tx->try_abort(); + throw std::runtime_error(format("Unable to create database: ", name, " Error: ", e.what())); + } + } + // if we are here then we successfully created the database + std::unique_lock lock(databasesMutex); + databases[name] = db; +} + +void LMDBStore::close_database(const std::string& name) +{ + LMDBDatabase::SharedPtr db; + { + std::unique_lock lock(databasesMutex); + const auto it = databases.find(name); + if (it == databases.end()) { + throw std::runtime_error(format("Database ", name, " not found")); + } + db = it->second; + databases.erase(it); + } +} + +LMDBStore::Database::SharedPtr LMDBStore::get_database(const std::string& name) +{ + std::unique_lock lock(databasesMutex); + const auto it = databases.find(name); + if (it == databases.end()) { + throw std::runtime_error(format("Database ", name, " not found")); + } + return it->second; +} + +std::vector LMDBStore::get_databases() const +{ + std::unique_lock lock(databasesMutex); + std::vector dbs; + dbs.reserve(databases.size()); + for (const auto& db : databases) { + dbs.push_back(db.second); + } + return dbs; +} + +std::vector LMDBStore::get_databases(const std::vector& puts) const +{ + std::unique_lock lock(databasesMutex); + std::vector dbs; + dbs.reserve(puts.size()); + for (const auto& p : puts) { + const auto it = databases.find(p.name); + if (it == databases.end()) { + throw std::runtime_error(format("Database ", p.name, " not found")); + } + dbs.push_back(it->second); + } + return dbs; +} + +uint64_t LMDBStore::get_stats(std::vector& stats) const +{ + std::vector dbs = get_databases(); + ReadTransaction::SharedPtr tx = create_read_transaction(); + for (const auto& db : dbs) { + stats.push_back(db->get_stats(*tx)); + } + return _environment->get_map_size(); +} + +void LMDBStore::put(std::vector& data) +{ + std::vector dbs = get_databases(data); + WriteTransaction::Ptr tx = create_write_transaction(); + try { + for (size_t i = 0; i < data.size(); i++) { + put(data[i].toWrite, data[i].toDelete, *dbs[i], *tx); + } + tx->commit(); + } catch (std::exception& e) { + tx->try_abort(); + throw std::runtime_error(format("Failed to commit data", " Error: ", e.what())); + } +} + +void LMDBStore::get(KeysVector& keys, OptionalValuesVector& values, const std::string& name) +{ + get(keys, values, get_database(name)); +} + +void LMDBStore::put(KeyDupValuesVector& toWrite, + KeyOptionalValuesVector& toDelete, + const LMDBDatabase& db, + LMDBWriteTransaction& tx) +{ + for (auto& kd : toWrite) { + for (auto& p : kd.second) { + tx.put_value(kd.first, p, db); + } + } + for (auto& kd : toDelete) { + if (!kd.second.has_value()) { + tx.delete_value(kd.first, db); + continue; + } + for (auto& p : kd.second.value()) { + tx.delete_value(kd.first, p, db); + } + } +} +void LMDBStore::get(KeysVector& keys, OptionalValuesVector& values, LMDBDatabase::SharedPtr db) +{ + values.reserve(keys.size()); + ReadTransaction::SharedPtr tx = create_read_transaction(); + if (!db->duplicate_keys_permitted()) { + const LMDBDatabase& dbRef = *db; + for (auto& k : keys) { + OptionalValues optional; + Value value; + bool result = tx->get_value(k, value, dbRef); + optional = result ? OptionalValues(ValuesVector{ value }) : std::nullopt; + values.emplace_back(optional); + } + return; + } + { + Cursor::Ptr cursor = std::make_unique(tx, db, _environment->getNextId()); + for (auto& k : keys) { + if (!cursor->set_at_key(k)) { + values.emplace_back(std::nullopt); + continue; + } + KeyDupValuesVector keyValuePairs; + cursor->read_next(1, keyValuePairs); + if (keyValuePairs.empty()) { + // this shouldn't happen but return the null optional anyway + values.emplace_back(std::nullopt); + continue; + } + ValuesVector retrievedValues; + values.reserve(keyValuePairs.size()); + for (auto& kv : keyValuePairs) { + for (auto& vals : kv.second) { + retrievedValues.push_back(std::move(vals)); + } + } + OptionalValues optionalValues = retrievedValues; + values.emplace_back(optionalValues); + } + } +} + +LMDBStore::Cursor::Ptr LMDBStore::create_cursor(ReadTransaction::SharedPtr tx, const std::string& dbName) +{ + Database::SharedPtr db = get_database(dbName); + return std::make_unique(tx, db, _environment->getNextId()); +} +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.hpp new file mode 100644 index 000000000000..67277d6308ae --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.hpp @@ -0,0 +1,74 @@ +#pragma once + +#include "barretenberg/lmdblib/lmdb_cursor.hpp" +#include "barretenberg/lmdblib/lmdb_database.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_read_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_store_base.hpp" +#include "barretenberg/lmdblib/lmdb_write_transaction.hpp" +#include "barretenberg/lmdblib/queries.hpp" +#include "barretenberg/lmdblib/types.hpp" +#include +#include +#include +#include +#include +#include +namespace bb::lmdblib { + +/** + * Implements a basic lmdb store. Consisting of an environment, any number of databases, transactions and cursors + */ + +class LMDBStore : public LMDBStoreBase { + public: + using Ptr = std::unique_ptr; + using SharedPtr = std::shared_ptr; + using WriteTransaction = LMDBWriteTransaction; + using ReadTransaction = LMDBReadTransaction; + using Database = LMDBDatabase; + using Cursor = LMDBCursor; + + struct PutData { + KeyDupValuesVector toWrite; + KeyOptionalValuesVector toDelete; + std::string name; + }; + + LMDBStore(std::string directory, uint64_t mapSizeKb, uint64_t maxNumReaders, uint64_t maxDbs); + LMDBStore(const LMDBStore& other) = delete; + LMDBStore(LMDBStore&& other) = delete; + LMDBStore& operator=(const LMDBStore& other) = delete; + LMDBStore& operator=(LMDBStore&& other) = delete; + ~LMDBStore() override = default; + + void open_database(const std::string& name, bool duplicateKeysPermitted = false); + void close_database(const std::string& name); + + void put(std::vector& data); + void get(KeysVector& keys, OptionalValuesVector& values, const std::string& name); + + Cursor::Ptr create_cursor(ReadTransaction::SharedPtr tx, const std::string& dbName); + + uint64_t get_stats(std::vector& stats) const; + + private: + // mutex to protect the databases map + mutable std::mutex databasesMutex; + std::unordered_map databases; + + void put(KeyDupValuesVector& toWrite, + KeyOptionalValuesVector& toDelete, + const LMDBDatabase& db, + LMDBWriteTransaction& tx); + void get(KeysVector& keys, OptionalValuesVector& values, LMDBDatabase::SharedPtr db); + // Returns the database of the given name + Database::SharedPtr get_database(const std::string& name); + // Returns all databases + std::vector get_databases() const; + // Returns database corresponding to the requested put operations + // Databases are returned in the order of the puts + // Throws if any of the databases are not found + std::vector get_databases(const std::vector& puts) const; +}; +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.test.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.test.cpp new file mode 100644 index 000000000000..5548dfcb07a5 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store.test.cpp @@ -0,0 +1,1038 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "barretenberg/common/serialize.hpp" +#include "barretenberg/common/streams.hpp" +#include "barretenberg/common/test.hpp" +#include "barretenberg/lmdblib/fixtures.hpp" +#include "barretenberg/lmdblib/lmdb_cursor.hpp" +#include "barretenberg/lmdblib/lmdb_database.hpp" +#include "barretenberg/lmdblib/lmdb_db_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_read_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_store.hpp" +#include "barretenberg/lmdblib/lmdb_write_transaction.hpp" +#include "barretenberg/lmdblib/queries.hpp" +#include "barretenberg/lmdblib/types.hpp" + +using namespace bb::lmdblib; + +class LMDBStoreTest : public testing::Test { + protected: + void SetUp() override + { + _directory = random_temp_directory(); + _mapSize = 1024 * 1024; + _maxReaders = 16; + std::filesystem::create_directories(_directory); + } + + void TearDown() override { std::filesystem::remove_all(_directory); } + + public: + static std::string _directory; + static uint32_t _maxReaders; + static uint64_t _mapSize; +}; + +std::string LMDBStoreTest::_directory; +uint32_t LMDBStoreTest::_maxReaders; +uint64_t LMDBStoreTest::_mapSize; + +LMDBStore::Ptr create_store(uint32_t maxNumDbs = 1) +{ + return std::make_unique( + LMDBStoreTest::_directory, LMDBStoreTest::_mapSize, LMDBStoreTest::_maxReaders, maxNumDbs); +} + +void prepare_test_data(int64_t numKeys, int64_t numValues, KeyDupValuesVector& testData, int64_t keyOffset = 0) +{ + for (int64_t count = 0; count < numKeys; count++) { + int64_t keyValue = keyOffset + count; + auto key = get_key(keyValue); + ValuesVector dup; + for (int64_t dupCount = 0; dupCount < numValues; dupCount++) { + auto data = get_value(keyValue, dupCount); + dup.emplace_back(data); + } + KeyValuesPair pair = { key, dup }; + testData.emplace_back(pair); + } +} + +void write_test_data(std::vector dbNames, int64_t numKeys, int64_t numValues, LMDBStore& store) +{ + KeyDupValuesVector toWrite; + KeyOptionalValuesVector toDelete; + prepare_test_data(numKeys, numValues, toWrite); + for (auto& name : dbNames) { + LMDBStore::PutData putData = { toWrite, toDelete, name }; + std::vector putDatas = { putData }; + store.put(putDatas); + } +} + +TEST_F(LMDBStoreTest, can_create_store) +{ + EXPECT_NO_THROW(LMDBStore store(LMDBStoreTest::_directory, LMDBStoreTest::_mapSize, LMDBStoreTest::_maxReaders, 1)); +} + +TEST_F(LMDBStoreTest, can_create_database) +{ + LMDBStore::Ptr store = create_store(); + const std::string name = "Test Database"; + EXPECT_NO_THROW(store->open_database(name)); +} + +TEST_F(LMDBStoreTest, can_not_create_more_databases_then_specified) +{ + LMDBStore::Ptr store = create_store(2); + const std::string name1 = "Test Database 1"; + EXPECT_NO_THROW(store->open_database(name1)); + const std::string name2 = "Test Database 2"; + EXPECT_NO_THROW(store->open_database(name2)); + const std::string name3 = "Test Database 3"; + EXPECT_THROW(store->open_database(name3), std::runtime_error); +} + +TEST_F(LMDBStoreTest, can_write_to_database) +{ + LMDBStore::Ptr store = create_store(); + const std::string name = "Test Database"; + store->open_database(name); + + auto key = get_key(0); + auto data = get_value(0, 1); + KeyDupValuesVector toWrite = { { { key, { data } } } }; + KeyOptionalValuesVector toDelete; + LMDBStore::PutData putData = { toWrite, toDelete, name }; + std::vector putDatas = { putData }; + EXPECT_NO_THROW(store->put(putDatas)); +} + +TEST_F(LMDBStoreTest, can_not_write_to_database_that_does_not_exist) +{ + LMDBStore::Ptr store = create_store(); + const std::string name = "Test Database"; + store->open_database(name); + + auto key = get_key(0); + auto data = get_value(0, 1); + KeyDupValuesVector toWrite = { { { key, { data } } } }; + KeyOptionalValuesVector toDelete; + LMDBStore::PutData putData = { toWrite, toDelete, "Non Existent Database" }; + std::vector putDatas = { putData }; + EXPECT_THROW(store->put(putDatas), std::runtime_error); +} + +TEST_F(LMDBStoreTest, can_close_database) +{ + LMDBStore::Ptr store = create_store(); + const std::string name = "Test Database"; + store->open_database(name); + + auto key = get_key(0); + auto data = get_value(0, 1); + KeyDupValuesVector toWrite = { { { key, { data } } } }; + KeyOptionalValuesVector toDelete; + LMDBStore::PutData putData = { toWrite, toDelete, name }; + std::vector putDatas = { putData }; + EXPECT_NO_THROW(store->put(putDatas)); + + EXPECT_NO_THROW(store->close_database(name)); + + // try another write + key = get_key(1); + data = get_value(1, 1); + toWrite = { { { key, { data } } } }; + putData = { toWrite, toDelete, name }; + putDatas = { putData }; + EXPECT_THROW(store->put(putDatas), std::runtime_error); +} + +TEST_F(LMDBStoreTest, can_write_duplicate_keys_to_database) +{ + LMDBStore::Ptr store = create_store(2); + const std::string name = "Test Database"; + store->open_database(name); + const std::string nameDups = "Test Database Dups"; + store->open_database(nameDups, true); + + // Write a key multiple times with different values + auto key = get_key(0); + auto data = get_value(0, 1); + auto dataDup = get_value(0, 2); + KeyDupValuesVector toWrite = { { { key, { data, dataDup } } } }; + KeyOptionalValuesVector toDelete; + LMDBStore::PutData putData = { toWrite, toDelete, name }; + std::vector putDatas = { putData }; + EXPECT_NO_THROW(store->put(putDatas)); + LMDBStore::PutData putDataDups = { toWrite, toDelete, nameDups }; + putDatas = { putDataDups }; + EXPECT_NO_THROW(store->put(putDatas)); +} + +TEST_F(LMDBStoreTest, can_read_from_database) +{ + LMDBStore::Ptr store = create_store(); + const std::string dbName = "Test Database"; + store->open_database(dbName); + + auto key = get_key(0); + auto expected = get_value(0, 1); + KeyDupValuesVector toWrite = { { { key, { expected } } } }; + KeyOptionalValuesVector toDelete; + LMDBStore::PutData putData = { toWrite, toDelete, dbName }; + std::vector putDatas = { putData }; + store->put(putDatas); + + OptionalValuesVector data; + KeysVector keys = { { key } }; + store->get(keys, data, dbName); + EXPECT_EQ(data.size(), 1); + EXPECT_TRUE(data[0].has_value()); + EXPECT_EQ(data[0].value(), ValuesVector{ expected }); +} + +TEST_F(LMDBStoreTest, can_not_read_from_non_existent_database) +{ + LMDBStore::Ptr store = create_store(); + const std::string dbName = "Test Database"; + store->open_database(dbName); + + auto key = get_key(0); + auto expected = get_value(0, 1); + KeyDupValuesVector toWrite = { { { key, { expected } } } }; + KeyOptionalValuesVector toDelete; + LMDBStore::PutData putData = { toWrite, toDelete, dbName }; + std::vector putDatas = { putData }; + store->put(putDatas); + + OptionalValuesVector data; + KeysVector keys = { { key } }; + EXPECT_THROW(store->get(keys, data, "Non Existent Database"), std::runtime_error); +} + +TEST_F(LMDBStoreTest, can_write_and_read_multiple) +{ + LMDBStore::Ptr store = create_store(2); + + const std::vector dbNames = { "Test Database 1", "Test Database 2" }; + for (const auto& s : dbNames) { + EXPECT_NO_THROW(store->open_database(s)); + } + + // We will write to multiple databases and read back from them both + int64_t numKeys = 10; + int64_t numValues = 1; + + write_test_data(dbNames, numKeys, numValues, *store); + + { + KeysVector keys; + OptionalValuesVector values; + for (int64_t count = 0; count < numKeys; count++) { + auto key = get_key(count); + auto expected = get_value(count, 0); + keys.push_back(key); + values.emplace_back(ValuesVector{ expected }); + } + + { + OptionalValuesVector retrieved; + store->get(keys, retrieved, dbNames[0]); + EXPECT_EQ(retrieved.size(), numKeys); + EXPECT_EQ(retrieved, values); + } + { + OptionalValuesVector retrieved; + store->get(keys, retrieved, dbNames[1]); + EXPECT_EQ(retrieved.size(), numKeys); + EXPECT_EQ(retrieved, values); + } + } +} + +TEST_F(LMDBStoreTest, can_write_and_read_multiple_duplicates) +{ + LMDBStore::Ptr store = create_store(2); + + const std::vector dbNames = { "Test Database No Dups", "Test Database Dups" }; + store->open_database(dbNames[0], false); + store->open_database(dbNames[1], true); + + // We will write multiple values to the same key + // Depending on whether the database supports duplicates determines if + // we append or overwrite + int64_t numKeys = 1; + int64_t numValues = 2; + + write_test_data(dbNames, numKeys, numValues, *store); + + { + KeysVector keys; + OptionalValuesVector valuesWithoutDups; + OptionalValuesVector valuesWithDups; + for (int64_t count = 0; count < numKeys; count++) { + auto key = get_key(count); + // For the no dup DB we expect the last written value to be present + auto expectedNoDup = get_value(count, numValues - 1); + keys.push_back(key); + ValuesVector dup; + for (int64_t dupCount = 0; dupCount < numValues; dupCount++) { + auto expectedWithDup = get_value(count, dupCount); + dup.emplace_back(expectedWithDup); + } + valuesWithDups.emplace_back(dup); + valuesWithoutDups.emplace_back(ValuesVector{ expectedNoDup }); + } + + { + OptionalValuesVector retrieved; + store->get(keys, retrieved, dbNames[0]); + EXPECT_EQ(retrieved.size(), numKeys); + EXPECT_EQ(retrieved, valuesWithoutDups); + } + { + OptionalValuesVector retrieved; + store->get(keys, retrieved, dbNames[1]); + EXPECT_EQ(retrieved.size(), numKeys); + EXPECT_EQ(retrieved, valuesWithDups); + } + } +} + +TEST_F(LMDBStoreTest, can_read_missing_keys_from_database) +{ + LMDBStore::Ptr store = create_store(); + const std::string dbName = "Test Database"; + store->open_database(dbName); + + // We will attempt to read a non-existant key and see that it returns nothing + + auto key = get_key(0); + auto expected = get_value(0, 0); + KeyDupValuesVector toWrite = { { { key, { expected } } } }; + KeyOptionalValuesVector toDelete; + LMDBStore::PutData putData = { toWrite, toDelete, dbName }; + std::vector putDatas = { putData }; + store->put(putDatas); + + OptionalValuesVector data; + auto missing = serialise(std::string("Missing Key")); + KeysVector keys = { { key }, { missing } }; + store->get(keys, data, dbName); + EXPECT_EQ(data.size(), 2); + EXPECT_TRUE(data[0].has_value()); + EXPECT_EQ(data[0].value(), ValuesVector{ expected }); + EXPECT_FALSE(data[1].has_value()); +} + +TEST_F(LMDBStoreTest, can_write_and_delete) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName); + + // Test writing and deleting items from the database + + int64_t numKeys = 10; + int64_t numValues = 1; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // Write 2 more and delete some + KeyDupValuesVector toWrite; + KeyOptionalValuesVector toDelete; + for (int64_t count = numKeys; count < numKeys + 2; count++) { + auto key = get_key(count); + auto data = get_value(count, 0); + ValuesVector dup = { data }; + KeyValuesPair pair = { key, dup }; + toWrite.emplace_back(pair); + } + for (int64_t count = 3; count < numKeys - 2; count++) { + auto key = get_key(count); + auto data = get_value(count, 0); + KeyValuesPair pair = { key, { data } }; + toDelete.emplace_back(pair); + } + LMDBStore::PutData putData = { toWrite, toDelete, dbName }; + std::vector putDatas = { putData }; + store->put(putDatas); + } + + { + KeysVector keys; + OptionalValuesVector values; + for (int64_t count = 0; count < numKeys + 2; count++) { + auto key = get_key(count); + auto expected = get_value(count, 0); + keys.push_back(key); + values.emplace_back((count < 3 || count >= (numKeys - 2)) ? OptionalValues(ValuesVector{ expected }) + : std::nullopt); + } + + { + OptionalValuesVector retrieved; + store->get(keys, retrieved, dbName); + EXPECT_EQ(retrieved.size(), numKeys + 2); + EXPECT_EQ(retrieved, values); + } + } +} + +TEST_F(LMDBStoreTest, can_write_and_delete_duplicates) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName, true); + + // Test writing and deleting entries from a database supporting duplicates + + int64_t numKeys = 10; + int64_t numValues = 5; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // Write 2 more and delete some + KeyDupValuesVector toWrite; + KeyOptionalValuesVector toDelete; + for (int64_t count = numKeys; count < numKeys + 2; count++) { + auto key = get_key(count); + ValuesVector dup; + for (int64_t dupCount = 0; dupCount < numValues; dupCount++) { + auto data = get_value(count, dupCount); + dup.emplace_back(data); + } + KeyValuesPair pair = { key, dup }; + toWrite.emplace_back(pair); + } + + // For some keys we remove some of the values + for (int64_t count = 3; count < numKeys - 2; count++) { + auto key = get_key(count); + ValuesVector dup; + // Remove some of the values + for (int64_t dupCount = 1; dupCount < numValues - 1; dupCount++) { + auto data = get_value(count, dupCount); + dup.emplace_back(data); + } + KeyValuesPair pair = { key, dup }; + toDelete.emplace_back(pair); + } + LMDBStore::PutData putData = { toWrite, toDelete, dbName }; + std::vector putDatas = { putData }; + store->put(putDatas); + } + + { + KeysVector keys; + OptionalValuesVector expectedValues; + for (int64_t count = 0; count < numKeys + 2; count++) { + auto key = get_key(count); + keys.push_back(key); + int64_t deletedDupStart = (count < 3 || count >= (numKeys - 2)) ? numValues : 1; + int64_t deletedDupEnd = (count < 3 || count >= (numKeys - 2)) ? 0 : numValues - 1; + ValuesVector dup; + // The number of keys retrieved depends on whether this key had some value deleted + for (int64_t dupCount = 0; dupCount < numValues; dupCount++) { + if (dupCount >= deletedDupStart && dupCount < deletedDupEnd) { + continue; + } + auto data = get_value(count, dupCount); + dup.emplace_back(data); + } + expectedValues.emplace_back(OptionalValues(ValuesVector{ dup })); + } + + { + OptionalValuesVector retrieved; + store->get(keys, retrieved, dbName); + EXPECT_EQ(retrieved.size(), numKeys + 2); + EXPECT_EQ(retrieved, expectedValues); + } + } +} + +TEST_F(LMDBStoreTest, can_delete_all_values_from_keys) +{ + LMDBStore::Ptr store = create_store(2); + + const std::vector dbNames = { "Test Database No Dups", "Test Database Dups" }; + store->open_database(dbNames[0], false); + store->open_database(dbNames[1], true); + + // Test writing and deleting entries from a database supporting duplicates + + int64_t numKeys = 10; + int64_t numValues = 5; + + write_test_data(dbNames, numKeys, numValues, *store); + + KeyDupValuesVector toWrite; + KeyOptionalValuesVector toDelete; + for (int64_t count = 3; count < numKeys - 2; count++) { + auto key = get_key(count); + KeyOptionalValuesPair pair = { key, std::nullopt }; + toDelete.emplace_back(pair); + } + LMDBStore::PutData putData1 = { toWrite, toDelete, dbNames[0] }; + LMDBStore::PutData putData2 = { toWrite, toDelete, dbNames[1] }; + std::vector putDatas = { putData1, putData2 }; + store->put(putDatas); + // read all the key/value pairs + { + // We first read the database that supports duplicates + KeysVector keys; + KeyDupValuesVector expectedValues; + for (int64_t count = 0; count < numKeys; count++) { + if (count >= 3 && count < numKeys - 2) { + continue; + } + auto key = get_key(count); + keys.push_back(key); + ValuesVector dup; + for (int64_t dupCount = 0; dupCount < numValues; dupCount++) { + auto data = get_value(count, dupCount); + dup.emplace_back(data); + } + KeyValuesPair pair = { key, dup }; + expectedValues.emplace_back(pair); + } + LMDBStore::ReadTransaction::SharedPtr readTransaction = store->create_shared_read_transaction(); + LMDBCursor::Ptr cursor = store->create_cursor(readTransaction, dbNames[1]); + cursor->set_at_start(); + + KeyDupValuesVector retrieved; + cursor->read_next((uint64_t)numKeys, retrieved); + EXPECT_EQ(retrieved, expectedValues); + } + + { + // Now read the database without duplicates + KeysVector keys; + KeyDupValuesVector expectedValues; + for (int64_t count = 0; count < numKeys; count++) { + if (count >= 3 && count < numKeys - 2) { + continue; + } + auto key = get_key(count); + keys.push_back(key); + ValuesVector dup(1, get_value(count, numValues - 1)); + KeyValuesPair pair = { key, dup }; + expectedValues.emplace_back(pair); + } + LMDBStore::ReadTransaction::SharedPtr readTransaction = store->create_shared_read_transaction(); + LMDBCursor::Ptr cursor = store->create_cursor(readTransaction, dbNames[0]); + cursor->set_at_start(); + + KeyDupValuesVector retrieved; + cursor->read_next((uint64_t)numKeys, retrieved); + EXPECT_EQ(retrieved, expectedValues); + } +} + +TEST_F(LMDBStoreTest, can_read_forwards_with_cursors) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName); + + int64_t numKeys = 10; + int64_t numValues = 1; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // read from a key mid-way through + int64_t startKey = 3; + auto key = get_key(startKey); + LMDBStore::ReadTransaction::SharedPtr tx = store->create_shared_read_transaction(); + LMDBStore::Cursor::Ptr cursor = store->create_cursor(tx, dbName); + bool setResult = cursor->set_at_key(key); + EXPECT_TRUE(setResult); + + int64_t numKeysToRead = 4; + KeyDupValuesVector keyValues; + cursor->read_next((uint64_t)numKeysToRead, keyValues); + + KeyDupValuesVector expected; + for (int64_t count = startKey; count < startKey + numKeysToRead; count++) { + auto key = get_key(count); + auto data = get_value(count, 0); + expected.emplace_back(KeyValuesPair{ key, { data } }); + } + EXPECT_EQ(keyValues, expected); + } +} + +TEST_F(LMDBStoreTest, can_read_duplicate_values_forwards_with_cursors) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName, true); + + int64_t numKeys = 10; + int64_t numValues = 5; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // read from a key mid-way through + int64_t startKey = 3; + auto key = get_key(startKey); + LMDBStore::ReadTransaction::SharedPtr tx = store->create_shared_read_transaction(); + LMDBStore::Cursor::Ptr cursor = store->create_cursor(tx, dbName); + bool setResult = cursor->set_at_key(key); + EXPECT_TRUE(setResult); + + int64_t numKeysToRead = 4; + KeyDupValuesVector keyValues; + cursor->read_next((uint64_t)numKeysToRead, keyValues); + + KeyDupValuesVector expected; + for (int64_t count = startKey; count < startKey + numKeysToRead; count++) { + auto key = get_key(count); + ValuesVector dup; + for (int64_t dupCount = 0; dupCount < numValues; dupCount++) { + auto data = get_value(count, dupCount); + dup.emplace_back(data); + } + KeyValuesPair pair = { key, dup }; + expected.emplace_back(pair); + } + EXPECT_EQ(keyValues, expected); + } +} + +TEST_F(LMDBStoreTest, can_read_backwards_with_cursors) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName, true); + + int64_t numKeys = 10; + int64_t numValues = 1; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // read from a key mid-way through + int64_t startKey = 7; + auto key = get_key(startKey); + LMDBStore::ReadTransaction::SharedPtr tx = store->create_shared_read_transaction(); + LMDBStore::Cursor::Ptr cursor = store->create_cursor(tx, dbName); + bool setResult = cursor->set_at_key(key); + EXPECT_TRUE(setResult); + + int64_t numKeysToRead = 4; + KeyDupValuesVector keyValues; + cursor->read_prev((uint64_t)numKeysToRead, keyValues); + + KeyDupValuesVector expected; + for (int64_t count = startKey; count > startKey - numKeysToRead; count--) { + auto key = get_key(count); + auto data = get_value(count, 0); + expected.emplace_back(KeyValuesPair{ key, { data } }); + } + EXPECT_EQ(keyValues, expected); + } +} + +TEST_F(LMDBStoreTest, can_read_duplicate_values_backwards_with_cursors) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName, true); + + int64_t numKeys = 10; + int64_t numValues = 5; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // read from a key mid-way through + int64_t startKey = 7; + auto key = get_key(startKey); + LMDBStore::ReadTransaction::SharedPtr tx = store->create_shared_read_transaction(); + LMDBStore::Cursor::Ptr cursor = store->create_cursor(tx, dbName); + bool setResult = cursor->set_at_key(key); + EXPECT_TRUE(setResult); + + int64_t numKeysToRead = 4; + KeyDupValuesVector keyValues; + cursor->read_prev((uint64_t)numKeysToRead, keyValues); + + KeyDupValuesVector expected; + for (int64_t count = startKey; count > startKey - numKeysToRead; count--) { + auto key = get_key(count); + ValuesVector dup; + for (int64_t dupCount = 0; dupCount < numValues; dupCount++) { + auto data = get_value(count, dupCount); + dup.emplace_back(data); + } + KeyValuesPair pair = { key, dup }; + expected.emplace_back(pair); + } + EXPECT_EQ(keyValues, expected); + } +} + +TEST_F(LMDBStoreTest, can_read_past_the_end_with_cursors) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName, false); + + int64_t numKeys = 10; + int64_t numValues = 1; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // read from a key mid-way through + int64_t startKey = 3; + auto key = get_key(startKey); + LMDBStore::ReadTransaction::SharedPtr tx = store->create_shared_read_transaction(); + LMDBStore::Cursor::Ptr cursor = store->create_cursor(tx, dbName); + bool setResult = cursor->set_at_key(key); + EXPECT_TRUE(setResult); + + int64_t numKeysToRead = 50; + KeyDupValuesVector keyValues; + cursor->read_next((uint64_t)numKeysToRead, keyValues); + + KeyDupValuesVector expected; + for (int64_t count = startKey; count < numKeys; count++) { + auto key = get_key(count); + auto data = get_value(count, 0); + expected.emplace_back(KeyValuesPair{ key, { data } }); + } + EXPECT_EQ(keyValues, expected); + } +} + +TEST_F(LMDBStoreTest, can_read_past_the_start_with_cursors) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName, false); + + int64_t numKeys = 10; + int64_t numValues = 1; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // read from a key mid-way through + int64_t startKey = 7; + auto key = get_key(startKey); + LMDBStore::ReadTransaction::SharedPtr tx = store->create_shared_read_transaction(); + LMDBStore::Cursor::Ptr cursor = store->create_cursor(tx, dbName); + bool setResult = cursor->set_at_key(key); + EXPECT_TRUE(setResult); + + int64_t numKeysToRead = 50; + KeyDupValuesVector keyValues; + cursor->read_prev((uint64_t)numKeysToRead, keyValues); + + KeyDupValuesVector expected; + for (int64_t count = startKey; count >= 0; count--) { + auto key = get_key(count); + auto data = get_value(count, 0); + expected.emplace_back(KeyValuesPair{ key, { data } }); + } + EXPECT_EQ(keyValues, expected); + } +} + +TEST_F(LMDBStoreTest, can_read_duplicates_past_the_end_with_cursors) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName, true); + + int64_t numKeys = 10; + int64_t numValues = 5; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // read from a key mid-way through + int64_t startKey = 3; + auto key = get_key(startKey); + LMDBStore::ReadTransaction::SharedPtr tx = store->create_shared_read_transaction(); + LMDBStore::Cursor::Ptr cursor = store->create_cursor(tx, dbName); + bool setResult = cursor->set_at_key(key); + EXPECT_TRUE(setResult); + + int64_t numKeysToRead = 50; + KeyDupValuesVector keyValues; + cursor->read_next((uint64_t)numKeysToRead, keyValues); + + KeyDupValuesVector expected; + for (int64_t count = startKey; count < numKeys; count++) { + auto key = get_key(count); + ValuesVector dup; + for (int64_t dupCount = 0; dupCount < numValues; dupCount++) { + auto data = get_value(count, dupCount); + dup.emplace_back(data); + } + KeyValuesPair pair = { key, dup }; + expected.emplace_back(pair); + } + EXPECT_EQ(keyValues, expected); + } +} + +TEST_F(LMDBStoreTest, can_read_duplicates_past_the_start_with_cursors) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName, true); + + int64_t numKeys = 10; + int64_t numValues = 5; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // read from a key mid-way through + int64_t startKey = 7; + auto key = get_key(startKey); + LMDBStore::ReadTransaction::SharedPtr tx = store->create_shared_read_transaction(); + LMDBStore::Cursor::Ptr cursor = store->create_cursor(tx, dbName); + bool setResult = cursor->set_at_key(key); + EXPECT_TRUE(setResult); + + int64_t numKeysToRead = 50; + KeyDupValuesVector keyValues; + cursor->read_prev((uint64_t)numKeysToRead, keyValues); + + KeyDupValuesVector expected; + for (int64_t count = startKey; count >= 0; count--) { + auto key = get_key(count); + ValuesVector dup; + for (int64_t dupCount = 0; dupCount < numValues; dupCount++) { + auto data = get_value(count, dupCount); + dup.emplace_back(data); + } + KeyValuesPair pair = { key, dup }; + expected.emplace_back(pair); + } + EXPECT_EQ(keyValues, expected); + } +} + +TEST_F(LMDBStoreTest, can_read_in_both_directions_with_cursors) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName, true); + + int64_t numKeys = 10; + int64_t numValues = 5; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // read backwards from a key mid-way through + int64_t startKey = 7; + auto key = get_key(startKey); + LMDBStore::ReadTransaction::SharedPtr tx = store->create_shared_read_transaction(); + LMDBStore::Cursor::Ptr cursor = store->create_cursor(tx, dbName); + bool setResult = cursor->set_at_key(key); + EXPECT_TRUE(setResult); + + int64_t numKeysToRead = 4; + KeyDupValuesVector keyValuesReverse; + cursor->read_prev((uint64_t)numKeysToRead, keyValuesReverse); + + // now read forwards using the same cursor + startKey = (startKey - numKeysToRead) + 1; + key = get_key(startKey); + setResult = cursor->set_at_key(key); + EXPECT_TRUE(setResult); + KeyDupValuesVector keyValues; + cursor->read_next((uint64_t)numKeysToRead, keyValues); + + // Ensure the data returned by the reverse operation matches that returned by the forwards operation + KeyDupValuesVector temp(keyValuesReverse.rbegin(), keyValuesReverse.rend()); + EXPECT_EQ(temp, keyValues); + } +} + +TEST_F(LMDBStoreTest, can_use_multiple_cursors_with_same_tx) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName, true); + + int64_t numKeys = 10; + int64_t numValues = 5; + + write_test_data({ dbName }, numKeys, numValues, *store); + + { + // read backwards from a key mid-way through + int64_t startKey = 7; + auto key = get_key(startKey); + LMDBStore::ReadTransaction::SharedPtr tx = store->create_shared_read_transaction(); + LMDBStore::Cursor::Ptr cursor = store->create_cursor(tx, dbName); + bool setResult = cursor->set_at_key(key); + EXPECT_TRUE(setResult); + + int64_t numKeysToRead = 4; + KeyDupValuesVector keyValuesReverse; + cursor->read_prev((uint64_t)numKeysToRead, keyValuesReverse); + + // now read forwards using a second cursor against the same transaction + LMDBStore::Cursor::Ptr cursor2 = store->create_cursor(tx, dbName); + startKey = (startKey - numKeysToRead) + 1; + + key = get_key(startKey); + setResult = cursor2->set_at_key(key); + EXPECT_TRUE(setResult); + + KeyDupValuesVector keyValues; + cursor2->read_next((uint64_t)numKeysToRead, keyValues); + + KeyDupValuesVector temp(keyValuesReverse.rbegin(), keyValuesReverse.rend()); + EXPECT_EQ(temp, keyValues); + } +} + +TEST_F(LMDBStoreTest, can_write_and_delete_many_times) +{ + LMDBStore::Ptr store = create_store(2); + + const std::vector dbNames = { "Test Database No Dups", "Test Database Dups" }; + store->open_database(dbNames[0], false); + store->open_database(dbNames[1], true); + + int64_t numKeys = 5000; + int64_t numValues = 10; + int64_t numIterations = 20; + + KeyOptionalValuesVector toDelete; + for (int64_t i = 0; i < numIterations; i++) { + KeyDupValuesVector testDataNoDuplicates; + KeyDupValuesVector testDataDuplicates; + prepare_test_data(numKeys, numValues, testDataDuplicates, i * numKeys); + prepare_test_data(numKeys, 1, testDataNoDuplicates, i * numKeys); + if (i > 0) { + // delete all of the previous iteration's keys + for (int64_t k = 0; k < numKeys; k++) { + int64_t keyToDelete = ((i - 1) * numKeys) + k; + toDelete.emplace_back(get_key(keyToDelete), std::nullopt); + } + } + LMDBStore::PutData putData1 = { testDataNoDuplicates, toDelete, dbNames[0] }; + LMDBStore::PutData putData2 = { testDataDuplicates, toDelete, dbNames[1] }; + std::vector putDatas{ putData1, putData2 }; + EXPECT_NO_THROW(store->put(putDatas)); + } +} + +TEST_F(LMDBStoreTest, reports_stats) +{ + LMDBStore::Ptr store = create_store(2); + + const std::vector dbNames = { "Test Database No Dups", "Test Database Dups" }; + store->open_database(dbNames[0], false); + store->open_database(dbNames[1], true); + + int64_t numKeys = 10; + int64_t numValues = 5; + + write_test_data(dbNames, numKeys, numValues, *store); + + std::vector stats; + uint64_t mapSize = store->get_stats(stats); + EXPECT_EQ(mapSize, LMDBStoreTest::_mapSize * 1024); + EXPECT_EQ(stats.size(), 2); + for (size_t i = 0; i < 2; i++) { + if (stats[i].name == dbNames[0]) { + // The DB without duplicates should contain as many items as there are keys + EXPECT_EQ(stats[i].numDataItems, numKeys); + } else if (stats[i].name == dbNames[1]) { + // The DB with duplicates should contain as keys * values number of items + EXPECT_EQ(stats[i].numDataItems, numKeys * numValues); + } else { + FAIL(); + } + } +} + +TEST_F(LMDBStoreTest, can_read_data_from_multiple_threads) +{ + LMDBStore::Ptr store = create_store(2); + + const std::string dbName = "Test Database"; + store->open_database(dbName, true); + + int64_t numKeys = 10; + int64_t numValues = 5; + int64_t numIterationsPerThread = 1000; + uint64_t numThreads = 10; + + write_test_data({ dbName }, numKeys, numValues, *store); + + std::vector threads; + { + auto func = [&]() -> void { + for (int64_t iteration = 0; iteration < numIterationsPerThread; iteration++) { + for (int64_t count = 0; count < numKeys; count++) { + auto key = get_key(count); + LMDBStore::ReadTransaction::SharedPtr tx = store->create_shared_read_transaction(); + LMDBStore::Cursor::Ptr cursor = store->create_cursor(tx, dbName); + cursor->set_at_key(key); + KeyDupValuesVector keyValuePairs; + cursor->read_next(1, keyValuePairs); + + ValuesVector dup; + KeyDupValuesVector expected; + for (int64_t dupCount = 0; dupCount < numValues; dupCount++) { + auto data = get_value(count, dupCount); + dup.emplace_back(data); + } + KeyValuesPair pair = { key, dup }; + expected.emplace_back(pair); + EXPECT_EQ(keyValuePairs, expected); + } + } + }; + std::vector> threads; + for (uint64_t count = 0; count < numThreads; count++) { + threads.emplace_back(std::make_unique(func)); + } + for (uint64_t count = 0; count < numThreads; count++) { + threads[count]->join(); + } + } +} diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store_base.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store_base.cpp new file mode 100644 index 000000000000..dd81015fea69 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store_base.cpp @@ -0,0 +1,32 @@ +#include "barretenberg/lmdblib/lmdb_store_base.hpp" + +namespace bb::lmdblib { +LMDBStoreBase::LMDBStoreBase(std::string directory, uint64_t mapSizeKb, uint64_t maxNumReaders, uint64_t maxDbs) + : _dbDirectory(std::move(directory)) + , _environment((std::make_shared(_dbDirectory, mapSizeKb, maxDbs, maxNumReaders))) +{} +LMDBStoreBase::~LMDBStoreBase() = default; +LMDBStoreBase::ReadTransaction::Ptr LMDBStoreBase::create_read_transaction() const +{ + _environment->wait_for_reader(); + return std::make_unique(_environment); +} + +LMDBStoreBase::ReadTransaction::SharedPtr LMDBStoreBase::create_shared_read_transaction() const +{ + _environment->wait_for_reader(); + return std::make_shared(_environment); +} + +LMDBStoreBase::DBCreationTransaction::Ptr LMDBStoreBase::create_db_transaction() const +{ + _environment->wait_for_writer(); + return std::make_unique(_environment); +} + +LMDBStoreBase::WriteTransaction::Ptr LMDBStoreBase::create_write_transaction() const +{ + _environment->wait_for_writer(); + return std::make_unique(_environment); +} +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store_base.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store_base.hpp new file mode 100644 index 000000000000..1ba5760385bb --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_store_base.hpp @@ -0,0 +1,29 @@ +#pragma once + +#include "barretenberg/lmdblib/lmdb_db_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_read_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_write_transaction.hpp" + +namespace bb::lmdblib { +class LMDBStoreBase { + public: + using ReadTransaction = LMDBReadTransaction; + using WriteTransaction = LMDBWriteTransaction; + using DBCreationTransaction = LMDBDatabaseCreationTransaction; + LMDBStoreBase(std::string directory, uint64_t mapSizeKb, uint64_t maxNumReaders, uint64_t maxDbs); + LMDBStoreBase(const LMDBStoreBase& other) = delete; + LMDBStoreBase& operator=(const LMDBStoreBase& other) = delete; + LMDBStoreBase(LMDBStoreBase&& other) noexcept = default; + LMDBStoreBase& operator=(LMDBStoreBase&& other) noexcept = default; + virtual ~LMDBStoreBase() = 0; + ReadTransaction::Ptr create_read_transaction() const; + ReadTransaction::SharedPtr create_shared_read_transaction() const; + WriteTransaction::Ptr create_write_transaction() const; + LMDBDatabaseCreationTransaction::Ptr create_db_transaction() const; + + protected: + std::string _dbDirectory; + LMDBEnvironment::SharedPtr _environment; +}; +} // namespace bb::lmdblib diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_transaction.cpp similarity index 60% rename from barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.cpp rename to barretenberg/cpp/src/barretenberg/lmdblib/lmdb_transaction.cpp index b41787138ebf..45b1c9d1c2d2 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.cpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_transaction.cpp @@ -1,17 +1,18 @@ -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp" - -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_transaction.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include #include -namespace bb::crypto::merkle_tree { +namespace bb::lmdblib { LMDBTransaction::LMDBTransaction(std::shared_ptr env, bool readOnly) : _environment(std::move(env)) + , _id(_environment->getNextId()) , state(TransactionState::OPEN) { MDB_txn* p = nullptr; - call_lmdb_func( - "mdb_txn_begin", mdb_txn_begin, _environment->underlying(), p, readOnly ? MDB_RDONLY : 0U, &_transaction); + const std::string name("mdb_txn_begin"); + call_lmdb_func(name, mdb_txn_begin, _environment->underlying(), p, readOnly ? MDB_RDONLY : 0U, &_transaction); } LMDBTransaction::~LMDBTransaction() = default; @@ -21,6 +22,11 @@ MDB_txn* LMDBTransaction::underlying() const return _transaction; } +uint64_t LMDBTransaction::id() const +{ + return _id; +} + void LMDBTransaction::abort() { if (state != TransactionState::OPEN) { @@ -35,8 +41,8 @@ bool LMDBTransaction::get_value(std::vector& key, std::vector& return lmdb_queries::get_value(key, data, db, *this); } -bool LMDBTransaction::get_value(std::vector& key, index_t& data, const LMDBDatabase& db) const +bool LMDBTransaction::get_value(std::vector& key, uint64_t& data, const LMDBDatabase& db) const { return lmdb_queries::get_value(key, data, db, *this); } -} // namespace bb::crypto::merkle_tree \ No newline at end of file +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_transaction.hpp similarity index 87% rename from barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp rename to barretenberg/cpp/src/barretenberg/lmdblib/lmdb_transaction.hpp index 9bbea8ea42e8..d5bc5c4b0198 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/lmdb_transaction.hpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_transaction.hpp @@ -1,12 +1,12 @@ #pragma once -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/queries.hpp" #include "lmdb.h" +#include #include #include -namespace bb::crypto::merkle_tree { +namespace bb::lmdblib { /* * Abstract base class to represent and LMDB transaction. @@ -19,6 +19,8 @@ enum TransactionState { ABORTED, }; +class LMDBDatabase; + class LMDBTransaction { public: LMDBTransaction(LMDBEnvironment::SharedPtr env, bool readOnly = false); @@ -31,6 +33,8 @@ class LMDBTransaction { MDB_txn* underlying() const; + uint64_t id() const; + /* * Rolls back the transaction. * Must be called by read transactions to signal the end of the transaction. @@ -50,7 +54,7 @@ class LMDBTransaction { template bool get_value(T& key, std::vector& data, const LMDBDatabase& db) const; - template bool get_value(T& key, index_t& data, const LMDBDatabase& db) const; + template bool get_value(T& key, uint64_t& data, const LMDBDatabase& db) const; template void get_all_values_greater_or_equal_key(const T& key, @@ -64,10 +68,11 @@ class LMDBTransaction { bool get_value(std::vector& key, std::vector& data, const LMDBDatabase& db) const; - bool get_value(std::vector& key, index_t& data, const LMDBDatabase& db) const; + bool get_value(std::vector& key, uint64_t& data, const LMDBDatabase& db) const; protected: std::shared_ptr _environment; + uint64_t _id; MDB_txn* _transaction; TransactionState state; }; @@ -78,7 +83,7 @@ template bool LMDBTransaction::get_value(T& key, std::vector bool LMDBTransaction::get_value(T& key, index_t& data, const LMDBDatabase& db) const +template bool LMDBTransaction::get_value(T& key, uint64_t& data, const LMDBDatabase& db) const { std::vector keyBuffer = serialise_key(key); return get_value(keyBuffer, data, db); @@ -120,4 +125,4 @@ void LMDBTransaction::get_all_values_lesser_or_equal_key(const T& key, { lmdb_queries::get_all_values_lesser_or_equal_key(key, data, db, *this); } -} // namespace bb::crypto::merkle_tree \ No newline at end of file +} // namespace bb::lmdblib diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_write_transaction.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_write_transaction.cpp new file mode 100644 index 000000000000..b4d3151ce03e --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_write_transaction.cpp @@ -0,0 +1,57 @@ + + +#include "barretenberg/lmdblib/lmdb_write_transaction.hpp" + +#include "barretenberg/lmdblib/lmdb_database.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include "barretenberg/lmdblib/queries.hpp" +#include "lmdb.h" +#include + +namespace bb::lmdblib { + +LMDBWriteTransaction::LMDBWriteTransaction(LMDBEnvironment::SharedPtr env) + : LMDBTransaction(std::move(env)) +{} + +LMDBWriteTransaction::~LMDBWriteTransaction() +{ + try_abort(); + _environment->release_writer(); +} + +void LMDBWriteTransaction::commit() +{ + if (state == TransactionState::ABORTED) { + throw std::runtime_error("Tried to commit reverted transaction"); + } + call_lmdb_func("mdb_txn_commit", mdb_txn_commit, _transaction); + state = TransactionState::COMMITTED; +} + +void LMDBWriteTransaction::try_abort() +{ + LMDBTransaction::abort(); +} + +void LMDBWriteTransaction::put_value(Key& key, Value& data, const LMDBDatabase& db) +{ + lmdb_queries::put_value(key, data, db, *this, db.duplicate_keys_permitted()); +} + +void LMDBWriteTransaction::put_value(Key& key, const uint64_t& data, const LMDBDatabase& db) +{ + lmdb_queries::put_value(key, data, db, *this, db.duplicate_keys_permitted()); +} + +void LMDBWriteTransaction::delete_value(Key& key, const LMDBDatabase& db) +{ + lmdb_queries::delete_value(key, db, *this); +} + +void LMDBWriteTransaction::delete_value(Key& key, Value& value, const LMDBDatabase& db) +{ + lmdb_queries::delete_value(key, value, db, *this); +} +} // namespace bb::lmdblib diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_write_transaction.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_write_transaction.hpp new file mode 100644 index 000000000000..71674409b6fc --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/lmdb_write_transaction.hpp @@ -0,0 +1,94 @@ +#pragma once +#include "barretenberg/common/serialize.hpp" +#include "barretenberg/crypto/merkle_tree/types.hpp" +#include "barretenberg/lmdblib/lmdb_database.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include "barretenberg/lmdblib/lmdb_transaction.hpp" +#include "barretenberg/lmdblib/queries.hpp" +#include "lmdb.h" +#include +#include +#include + +namespace bb::lmdblib { + +/** + * RAII wrapper for an LMDB write transaction. + * Provides methods for writing values by their key. + * Must be either committed to persist the changes or aborted to roll them back. + * Will automatically abort the transaction during destruction if changes have not been committed. + */ + +class LMDBWriteTransaction : public LMDBTransaction { + public: + using Ptr = std::unique_ptr; + + LMDBWriteTransaction(LMDBEnvironment::SharedPtr env); + LMDBWriteTransaction(const LMDBWriteTransaction& other) = delete; + LMDBWriteTransaction(LMDBWriteTransaction&& other) = delete; + LMDBWriteTransaction& operator=(const LMDBWriteTransaction& other) = delete; + LMDBWriteTransaction& operator=(LMDBWriteTransaction&& other) = delete; + ~LMDBWriteTransaction() override; + + template void put_value(T& key, Value& data, const LMDBDatabase& db); + + template void put_value(T& key, const uint64_t& data, const LMDBDatabase& db); + + void put_value(Key& key, Value& data, const LMDBDatabase& db); + + void put_value(Key& key, const uint64_t& data, const LMDBDatabase& db); + + template void delete_value(T& key, const LMDBDatabase& db); + + template void delete_value(T& key, Value& value, const LMDBDatabase& db); + + void delete_value(Key& key, const LMDBDatabase& db); + + void delete_value(Key& key, Value& value, const LMDBDatabase& db); + + template void delete_all_values_greater_or_equal_key(const T& key, const LMDBDatabase& db) const; + + template void delete_all_values_lesser_or_equal_key(const T& key, const LMDBDatabase& db) const; + + void commit(); + + void try_abort(); +}; + +template void LMDBWriteTransaction::put_value(T& key, Value& data, const LMDBDatabase& db) +{ + Key keyBuffer = serialise_key(key); + put_value(keyBuffer, data, db); +} + +template void LMDBWriteTransaction::put_value(T& key, const uint64_t& data, const LMDBDatabase& db) +{ + Key keyBuffer = serialise_key(key); + put_value(keyBuffer, data, db); +} + +template void LMDBWriteTransaction::delete_value(T& key, const LMDBDatabase& db) +{ + Key keyBuffer = serialise_key(key); + lmdb_queries::delete_value(keyBuffer, db, *this); +} + +template void LMDBWriteTransaction::delete_value(T& key, Value& value, const LMDBDatabase& db) +{ + Key keyBuffer = serialise_key(key); + lmdb_queries::delete_value(keyBuffer, value, db, *this); +} + +template +void LMDBWriteTransaction::delete_all_values_greater_or_equal_key(const T& key, const LMDBDatabase& db) const +{ + lmdb_queries::delete_all_values_greater_or_equal_key(key, db, *this); +} + +template +void LMDBWriteTransaction::delete_all_values_lesser_or_equal_key(const T& key, const LMDBDatabase& db) const +{ + lmdb_queries::delete_all_values_lesser_or_equal_key(key, db, *this); +} +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/queries.cpp b/barretenberg/cpp/src/barretenberg/lmdblib/queries.cpp new file mode 100644 index 000000000000..9a6647513d14 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/queries.cpp @@ -0,0 +1,227 @@ +#include "barretenberg/lmdblib/queries.hpp" +#include "barretenberg/lmdblib/lmdb_cursor.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include "barretenberg/lmdblib/lmdb_write_transaction.hpp" +#include "barretenberg/lmdblib/types.hpp" +#include "lmdb.h" +#include +#include + +namespace bb::lmdblib::lmdb_queries { + +void put_value( + Key& key, Value& data, const LMDBDatabase& db, bb::lmdblib::LMDBWriteTransaction& tx, bool duplicatesPermitted) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + MDB_val dbVal; + dbVal.mv_size = data.size(); + dbVal.mv_data = (void*)data.data(); + + // The database has been configured to allow duplicate keys, but we don't permit duplicate key/value pairs + // If we create a duplicate it will not insert it + unsigned int flags = duplicatesPermitted ? MDB_NODUPDATA : 0U; + call_lmdb_func("mdb_put", mdb_put, tx.underlying(), db.underlying(), &dbKey, &dbVal, flags); +} + +void put_value(Key& key, + const uint64_t& data, + const LMDBDatabase& db, + bb::lmdblib::LMDBWriteTransaction& tx, + bool duplicatesPermitted) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + // use the serialise key method for serialising the index + Value serialised = serialise_key(data); + + MDB_val dbVal; + dbVal.mv_size = serialised.size(); + dbVal.mv_data = (void*)serialised.data(); + + // The database has been configured to allow duplicate keys, but we don't permit duplicate key/value pairs + // If we create a duplicate it will not insert it + unsigned int flags = duplicatesPermitted ? MDB_NODUPDATA : 0U; + call_lmdb_func("mdb_put", mdb_put, tx.underlying(), db.underlying(), &dbKey, &dbVal, flags); +} + +void delete_value(Key& key, const LMDBDatabase& db, bb::lmdblib::LMDBWriteTransaction& tx) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + MDB_val* dbVal = nullptr; + int code = call_lmdb_func_with_return(mdb_del, tx.underlying(), db.underlying(), &dbKey, dbVal); + if (code != MDB_SUCCESS && code != MDB_NOTFOUND) { + throw_error("mdb_del", code); + } +} + +void delete_value(Key& key, Value& value, const LMDBDatabase& db, bb::lmdblib::LMDBWriteTransaction& tx) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + MDB_val dbVal; + dbVal.mv_size = value.size(); + dbVal.mv_data = (void*)value.data(); + + int code = call_lmdb_func_with_return(mdb_del, tx.underlying(), db.underlying(), &dbKey, &dbVal); + if (code != MDB_SUCCESS && code != MDB_NOTFOUND) { + throw_error("mdb_del", code); + } +} + +bool get_value(Key& key, Value& data, const LMDBDatabase& db, const bb::lmdblib::LMDBTransaction& tx) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + MDB_val dbVal; + if (!call_lmdb_func(mdb_get, tx.underlying(), db.underlying(), &dbKey, &dbVal)) { + return false; + } + copy_to_vector(dbVal, data); + return true; +} + +bool get_value(Key& key, uint64_t& data, const LMDBDatabase& db, const bb::lmdblib::LMDBTransaction& tx) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + MDB_val dbVal; + if (!call_lmdb_func(mdb_get, tx.underlying(), db.underlying(), &dbKey, &dbVal)) { + return false; + } + // use the deserialise key method for deserialising the index + deserialise_key(dbVal.mv_data, data); + return true; +} + +bool set_at_key(const LMDBCursor& cursor, Key& key) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + MDB_val dbVal; + int code = mdb_cursor_get(cursor.underlying(), &dbKey, &dbVal, MDB_SET); + return code == MDB_SUCCESS; +} + +bool set_at_key_gte(const LMDBCursor& cursor, Key& key) +{ + MDB_val dbKey; + dbKey.mv_size = key.size(); + dbKey.mv_data = (void*)key.data(); + + MDB_val dbVal; + int code = mdb_cursor_get(cursor.underlying(), &dbKey, &dbVal, MDB_SET_RANGE); + return code == MDB_SUCCESS; +} + +bool set_at_start(const LMDBCursor& cursor) +{ + MDB_val dbKey; + MDB_val dbVal; + int code = mdb_cursor_get(cursor.underlying(), &dbKey, &dbVal, MDB_FIRST); + return code == MDB_SUCCESS; +} + +bool set_at_end(const LMDBCursor& cursor) +{ + MDB_val dbKey; + MDB_val dbVal; + int code = mdb_cursor_get(cursor.underlying(), &dbKey, &dbVal, MDB_LAST); + return code == MDB_SUCCESS; +} + +bool read_next(const LMDBCursor& cursor, KeyDupValuesVector& keyValues, uint64_t numKeysToRead, MDB_cursor_op op) +{ + uint64_t numKeysRead = 0; + MDB_val dbKey; + MDB_val dbVal; + int code = mdb_cursor_get(cursor.underlying(), &dbKey, &dbVal, MDB_GET_CURRENT); + while (numKeysRead < numKeysToRead && code == MDB_SUCCESS) { + // extract the key and value + Value value; + Key key; + copy_to_vector(dbVal, value); + copy_to_vector(dbKey, key); + ValuesVector values; + values.emplace_back(std::move(value)); + keyValues.emplace_back(std::move(key), std::move(values)); + ++numKeysRead; + // move to the next key + code = mdb_cursor_get(cursor.underlying(), &dbKey, &dbVal, op); + } + + return code != MDB_SUCCESS; // we're done +} + +bool read_next_dup(const LMDBCursor& cursor, KeyDupValuesVector& keyValues, uint64_t numKeysToRead, MDB_cursor_op op) +{ + uint64_t numKeysRead = 0; + MDB_val dbKey; + MDB_val dbVal; + ValuesVector values; + + // ensure we are positioned at first data item of current key + int code = mdb_cursor_get(cursor.underlying(), &dbKey, &dbVal, MDB_FIRST_DUP); + while (numKeysRead < numKeysToRead && code == MDB_SUCCESS) { + code = mdb_cursor_get(cursor.underlying(), &dbKey, &dbVal, MDB_GET_CURRENT); + // extract the key and value + Value value; + Key key; + copy_to_vector(dbVal, value); + copy_to_vector(dbKey, key); + values.push_back(value); + + // move to the next value at this key + code = mdb_cursor_get(cursor.underlying(), &dbKey, &dbVal, MDB_NEXT_DUP); + if (code == MDB_NOTFOUND) { + // No more values at this key + ++numKeysRead; + keyValues.emplace_back(std::move(key), std::move(values)); + values = ValuesVector(); + // move to the next key + code = mdb_cursor_get(cursor.underlying(), &dbKey, &dbVal, op); + if (code == MDB_SUCCESS) { + code = mdb_cursor_get(cursor.underlying(), &dbKey, &dbVal, MDB_FIRST_DUP); + } else { + // no more keys to read + return true; + } + } + } + + return false; +} + +bool read_next(const LMDBCursor& cursor, KeyDupValuesVector& keyValues, uint64_t numKeysToRead) +{ + return read_next(cursor, keyValues, numKeysToRead, MDB_NEXT); +} +bool read_prev(const LMDBCursor& cursor, KeyDupValuesVector& keyValues, uint64_t numKeysToRead) +{ + return read_next(cursor, keyValues, numKeysToRead, MDB_PREV); +} + +bool read_next_dup(const LMDBCursor& cursor, KeyDupValuesVector& keyValues, uint64_t numKeysToRead) +{ + return read_next_dup(cursor, keyValues, numKeysToRead, MDB_NEXT_NODUP); +} +bool read_prev_dup(const LMDBCursor& cursor, KeyDupValuesVector& keyValues, uint64_t numKeysToRead) +{ + return read_next_dup(cursor, keyValues, numKeysToRead, MDB_PREV_NODUP); +} +} // namespace bb::lmdblib::lmdb_queries diff --git a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/queries.hpp similarity index 88% rename from barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp rename to barretenberg/cpp/src/barretenberg/lmdblib/queries.hpp index c26768fa8ec0..f9045b403371 100644 --- a/barretenberg/cpp/src/barretenberg/crypto/merkle_tree/lmdb_store/queries.hpp +++ b/barretenberg/cpp/src/barretenberg/lmdblib/queries.hpp @@ -1,16 +1,18 @@ #pragma once -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_database.hpp" +#include "barretenberg/lmdblib/lmdb_database.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" +#include "barretenberg/lmdblib/types.hpp" #include "lmdb.h" #include #include #include -namespace bb::crypto::merkle_tree { +namespace bb::lmdblib { class LMDBTransaction; -class LMDBTreeWriteTransaction; +class LMDBWriteTransaction; +class LMDBCursor; namespace lmdb_queries { @@ -178,10 +180,10 @@ bool get_value_or_previous(TKey& key, } template -bool get_value_or_greater(TKey& key, std::vector& data, const LMDBDatabase& db, const TxType& tx) +bool get_value_or_greater(TKey& key, Value& data, const LMDBDatabase& db, const TxType& tx) { bool success = false; - std::vector keyBuffer = serialise_key(key); + Key keyBuffer = serialise_key(key); uint32_t keySize = static_cast(keyBuffer.size()); MDB_cursor* cursor = nullptr; call_lmdb_func("mdb_cursor_open", mdb_cursor_open, tx.underlying(), db.underlying(), &cursor); @@ -217,12 +219,9 @@ bool get_value_or_greater(TKey& key, std::vector& data, const LMDBDatab } template -void get_all_values_greater_or_equal_key(const TKey& key, - std::vector>& data, - const LMDBDatabase& db, - const TxType& tx) +void get_all_values_greater_or_equal_key(const TKey& key, ValuesVector& data, const LMDBDatabase& db, const TxType& tx) { - std::vector keyBuffer = serialise_key(key); + Key keyBuffer = serialise_key(key); uint32_t keySize = static_cast(keyBuffer.size()); MDB_cursor* cursor = nullptr; call_lmdb_func("mdb_cursor_open", mdb_cursor_open, tx.underlying(), db.underlying(), &cursor); @@ -243,7 +242,7 @@ void get_all_values_greater_or_equal_key(const TKey& key, break; } // this is data that we need to extract - std::vector temp; + Value temp; copy_to_vector(dbVal, temp); data.emplace_back(temp); @@ -266,7 +265,7 @@ void get_all_values_greater_or_equal_key(const TKey& key, template void delete_all_values_greater_or_equal_key(const TKey& key, const LMDBDatabase& db, const TxType& tx) { - std::vector keyBuffer = serialise_key(key); + Key keyBuffer = serialise_key(key); uint32_t keySize = static_cast(keyBuffer.size()); MDB_cursor* cursor = nullptr; call_lmdb_func("mdb_cursor_open", mdb_cursor_open, tx.underlying(), db.underlying(), &cursor); @@ -310,12 +309,9 @@ void delete_all_values_greater_or_equal_key(const TKey& key, const LMDBDatabase& } template -void get_all_values_lesser_or_equal_key(const TKey& key, - std::vector>& data, - const LMDBDatabase& db, - const TxType& tx) +void get_all_values_lesser_or_equal_key(const TKey& key, ValuesVector& data, const LMDBDatabase& db, const TxType& tx) { - std::vector keyBuffer = serialise_key(key); + Key keyBuffer = serialise_key(key); uint32_t keySize = static_cast(keyBuffer.size()); MDB_cursor* cursor = nullptr; call_lmdb_func("mdb_cursor_open", mdb_cursor_open, tx.underlying(), db.underlying(), &cursor); @@ -330,10 +326,10 @@ void get_all_values_lesser_or_equal_key(const TKey& key, int code = mdb_cursor_get(cursor, &dbKey, &dbVal, MDB_SET_RANGE); if (code == 0) { // we found the key, now determine if it is the exact key - std::vector temp = mdb_val_to_vector(dbKey); + Key temp = mdb_val_to_vector(dbKey); if (keyBuffer == temp) { // we have the exact key, copy it's data - std::vector temp; + Value temp; copy_to_vector(dbVal, temp); data.push_back(temp); } else { @@ -356,7 +352,7 @@ void get_all_values_lesser_or_equal_key(const TKey& key, break; } // the same size, grab the value and go round again - std::vector temp; + Value temp; copy_to_vector(dbVal, temp); data.push_back(temp); @@ -377,7 +373,7 @@ void get_all_values_lesser_or_equal_key(const TKey& key, template void delete_all_values_lesser_or_equal_key(const TKey& key, const LMDBDatabase& db, const TxType& tx) { - std::vector keyBuffer = serialise_key(key); + Key keyBuffer = serialise_key(key); uint32_t keySize = static_cast(keyBuffer.size()); MDB_cursor* cursor = nullptr; call_lmdb_func("mdb_cursor_open", mdb_cursor_open, tx.underlying(), db.underlying(), &cursor); @@ -392,7 +388,7 @@ void delete_all_values_lesser_or_equal_key(const TKey& key, const LMDBDatabase& int code = mdb_cursor_get(cursor, &dbKey, &dbVal, MDB_SET_RANGE); if (code == 0) { // we found the key, now determine if it is the exact key - std::vector temp = mdb_val_to_vector(dbKey); + Key temp = mdb_val_to_vector(dbKey); if (keyBuffer == temp) { // we have the exact key, delete it's data code = mdb_cursor_del(cursor, 0); @@ -440,20 +436,29 @@ void delete_all_values_lesser_or_equal_key(const TKey& key, const LMDBDatabase& call_lmdb_func(mdb_cursor_close, cursor); } -void put_value(std::vector& key, - std::vector& data, - const LMDBDatabase& db, - LMDBTreeWriteTransaction& tx); +void put_value( + Key& key, Value& data, const LMDBDatabase& db, LMDBWriteTransaction& tx, bool duplicatesPermitted = false); + +void put_value( + Key& key, const uint64_t& data, const LMDBDatabase& db, LMDBWriteTransaction& tx, bool duplicatesPermitted = false); + +void delete_value(Key& key, const LMDBDatabase& db, LMDBWriteTransaction& tx); + +void delete_value(Key& key, Value& value, const LMDBDatabase& db, LMDBWriteTransaction& tx); + +bool get_value(Key& key, Value& data, const LMDBDatabase& db, const LMDBTransaction& tx); -void put_value(std::vector& key, const index_t& data, const LMDBDatabase& db, LMDBTreeWriteTransaction& tx); +bool get_value(Key& key, uint64_t& data, const LMDBDatabase& db, const LMDBTransaction& tx); -void delete_value(std::vector& key, const LMDBDatabase& db, LMDBTreeWriteTransaction& tx); +bool set_at_key(const LMDBCursor& cursor, Key& key); +bool set_at_key_gte(const LMDBCursor& cursor, Key& key); +bool set_at_start(const LMDBCursor& cursor); +bool set_at_end(const LMDBCursor& cursor); -bool get_value(std::vector& key, - std::vector& data, - const LMDBDatabase& db, - const LMDBTransaction& tx); +bool read_next(const LMDBCursor& cursor, KeyDupValuesVector& keyValues, uint64_t numKeysToRead); +bool read_prev(const LMDBCursor& cursor, KeyDupValuesVector& keyValues, uint64_t numKeysToRead); -bool get_value(std::vector& key, index_t& data, const LMDBDatabase& db, const LMDBTransaction& tx); +bool read_next_dup(const LMDBCursor& cursor, KeyDupValuesVector& keyValues, uint64_t numKeysToRead); +bool read_prev_dup(const LMDBCursor& cursor, KeyDupValuesVector& keyValues, uint64_t numKeysToRead); } // namespace lmdb_queries -} // namespace bb::crypto::merkle_tree +} // namespace bb::lmdblib diff --git a/barretenberg/cpp/src/barretenberg/lmdblib/types.hpp b/barretenberg/cpp/src/barretenberg/lmdblib/types.hpp new file mode 100644 index 000000000000..610ea11fdb48 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/lmdblib/types.hpp @@ -0,0 +1,69 @@ +#pragma once + +#include "barretenberg/serialize/msgpack.hpp" +#include "lmdb.h" +#include +#include +#include +#include +#include +namespace bb::lmdblib { +using Key = std::vector; +using Value = std::vector; +using KeysVector = std::vector; +using ValuesVector = std::vector; +using KeyValuesPair = std::pair; +using OptionalValues = std::optional; +using OptionalValuesVector = std::vector; +using KeyDupValuesVector = std::vector; +using KeyOptionalValuesPair = std::pair; +using KeyOptionalValuesVector = std::vector; + +struct DBStats { + std::string name; + uint64_t numDataItems; + uint64_t totalUsedSize; + + DBStats() = default; + DBStats(const DBStats& other) = default; + DBStats(DBStats&& other) noexcept { *this = std::move(other); } + ~DBStats() = default; + DBStats(std::string name, MDB_stat& stat) + : name(std::move(name)) + , numDataItems(stat.ms_entries) + , totalUsedSize(stat.ms_psize * (stat.ms_branch_pages + stat.ms_leaf_pages + stat.ms_overflow_pages)) + {} + DBStats(const std::string& name, uint64_t numDataItems, uint64_t totalUsedSize) + : name(name) + , numDataItems(numDataItems) + , totalUsedSize(totalUsedSize) + {} + + MSGPACK_FIELDS(name, numDataItems, totalUsedSize) + + bool operator==(const DBStats& other) const + { + return name == other.name && numDataItems == other.numDataItems && totalUsedSize == other.totalUsedSize; + } + + DBStats& operator=(const DBStats& other) = default; + + DBStats& operator=(DBStats&& other) noexcept + { + if (this != &other) { + name = std::move(other.name); + numDataItems = other.numDataItems; + totalUsedSize = other.totalUsedSize; + } + return *this; + } + + friend std::ostream& operator<<(std::ostream& os, const DBStats& stats) + { + os << "DB " << stats.name << ", num items: " << stats.numDataItems + << ", total used size: " << stats.totalUsedSize; + return os; + } +}; + +} // namespace bb::lmdblib \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/messaging/dispatcher.hpp b/barretenberg/cpp/src/barretenberg/messaging/dispatcher.hpp index 20327d4757b7..3adbee02f6ed 100644 --- a/barretenberg/cpp/src/barretenberg/messaging/dispatcher.hpp +++ b/barretenberg/cpp/src/barretenberg/messaging/dispatcher.hpp @@ -19,7 +19,7 @@ class MessageDispatcher { public: MessageDispatcher() = default; - bool onNewData(msgpack::object& obj, msgpack::sbuffer& buffer) + bool onNewData(msgpack::object& obj, msgpack::sbuffer& buffer) const { bb::messaging::HeaderOnlyMessage header; obj.convert(header); diff --git a/barretenberg/cpp/src/barretenberg/world_state_napi/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/nodejs_module/CMakeLists.txt similarity index 73% rename from barretenberg/cpp/src/barretenberg/world_state_napi/CMakeLists.txt rename to barretenberg/cpp/src/barretenberg/nodejs_module/CMakeLists.txt index 31a88dc17c03..0a2e316ac8a9 100644 --- a/barretenberg/cpp/src/barretenberg/world_state_napi/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/CMakeLists.txt @@ -24,7 +24,7 @@ execute_process( string(REGEX REPLACE "[\r\n\"]" "" NODE_ADDON_API_DIR ${NODE_ADDON_API_DIR}) string(REGEX REPLACE "[\r\n\"]" "" NODE_API_HEADERS_DIR ${NODE_API_HEADERS_DIR}) -add_library(world_state_napi SHARED ${SOURCE_FILES}) -set_target_properties(world_state_napi PROPERTIES PREFIX "" SUFFIX ".node") -target_include_directories(world_state_napi PRIVATE ${NODE_API_HEADERS_DIR} ${NODE_ADDON_API_DIR}) -target_link_libraries(world_state_napi PRIVATE world_state) +add_library(nodejs_module SHARED ${SOURCE_FILES}) +set_target_properties(nodejs_module PROPERTIES PREFIX "" SUFFIX ".node") +target_include_directories(nodejs_module PRIVATE ${NODE_API_HEADERS_DIR} ${NODE_ADDON_API_DIR}) +target_link_libraries(nodejs_module PRIVATE world_state) diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/init_module.cpp b/barretenberg/cpp/src/barretenberg/nodejs_module/init_module.cpp new file mode 100644 index 000000000000..8cfa6c36f8d2 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/init_module.cpp @@ -0,0 +1,13 @@ +#include "barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.hpp" +#include "barretenberg/nodejs_module/world_state/world_state.hpp" +#include "napi.h" + +Napi::Object Init(Napi::Env env, Napi::Object exports) +{ + exports.Set(Napi::String::New(env, "WorldState"), bb::nodejs::WorldStateWrapper::get_class(env)); + exports.Set(Napi::String::New(env, "LMDBStore"), bb::nodejs::lmdb_store::LMDBStoreWrapper::get_class(env)); + return exports; +} + +// NOLINTNEXTLINE +NODE_API_MODULE(addon, Init) diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_message.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_message.hpp new file mode 100644 index 000000000000..73ceb56a9452 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_message.hpp @@ -0,0 +1,123 @@ +#pragma once +#include "barretenberg/lmdblib/types.hpp" +#include "barretenberg/messaging/header.hpp" +#include "barretenberg/serialize/msgpack.hpp" +#include "msgpack/adaptor/define_decl.hpp" +#include +#include +#include + +namespace bb::nodejs::lmdb_store { + +using namespace bb::messaging; + +enum LMDBStoreMessageType { + OPEN_DATABASE = FIRST_APP_MSG_TYPE, + + GET, + HAS, + + START_CURSOR, + ADVANCE_CURSOR, + CLOSE_CURSOR, + + BATCH, + + STATS, + + CLOSE, +}; + +struct OpenDatabaseRequest { + std::string db; + std::optional uniqueKeys; + MSGPACK_FIELDS(db, uniqueKeys); +}; + +struct GetRequest { + lmdblib::KeysVector keys; + std::string db; + MSGPACK_FIELDS(keys, db); +}; + +struct GetResponse { + lmdblib::OptionalValuesVector values; + MSGPACK_FIELDS(values); +}; + +struct HasRequest { + // std::map> entries; + lmdblib::KeyOptionalValuesVector entries; + std::string db; + MSGPACK_FIELDS(entries, db); +}; + +struct HasResponse { + // std::map exists; + std::vector exists; + MSGPACK_FIELDS(exists); +}; + +struct Batch { + lmdblib::KeyDupValuesVector addEntries; + lmdblib::KeyOptionalValuesVector removeEntries; + + MSGPACK_FIELDS(addEntries, removeEntries); +}; + +struct BatchRequest { + std::map batches; + MSGPACK_FIELDS(batches); +}; + +struct StartCursorRequest { + lmdblib::Key key; + std::optional reverse; + std::optional count; + std::optional onePage; + std::string db; + MSGPACK_FIELDS(key, reverse, count, onePage, db); +}; + +struct StartCursorResponse { + std::optional cursor; + lmdblib::KeyDupValuesVector entries; + MSGPACK_FIELDS(cursor, entries); +}; + +struct AdvanceCursorRequest { + uint64_t cursor; + std::optional count; + MSGPACK_FIELDS(cursor, count); +}; + +struct CloseCursorRequest { + uint64_t cursor; + MSGPACK_FIELDS(cursor); +}; + +struct AdvanceCursorResponse { + lmdblib::KeyDupValuesVector entries; + bool done; + MSGPACK_FIELDS(entries, done); +}; + +struct BoolResponse { + bool ok; + MSGPACK_FIELDS(ok); +}; + +struct BatchResponse { + uint64_t durationNs; + MSGPACK_FIELDS(durationNs); +}; + +struct StatsResponse { + std::vector stats; + uint64_t dbMapSizeBytes; + MSGPACK_FIELDS(stats, dbMapSizeBytes); +}; + +} // namespace bb::nodejs::lmdb_store + +MSGPACK_ADD_ENUM(bb::nodejs::lmdb_store::LMDBStoreMessageType) diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.cpp b/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.cpp new file mode 100644 index 000000000000..e93b5902f816 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.cpp @@ -0,0 +1,262 @@ +#include "barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.hpp" +#include "barretenberg/lmdblib/lmdb_store.hpp" +#include "barretenberg/lmdblib/types.hpp" +#include "barretenberg/nodejs_module/lmdb_store/lmdb_store_message.hpp" +#include "napi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace bb::nodejs; +using namespace bb::nodejs::lmdb_store; + +const uint64_t DEFAULT_MAP_SIZE = 1024UL * 1024; +const uint64_t DEFAULT_MAX_READERS = 16; +const uint64_t DEFAULT_CURSOR_PAGE_SIZE = 10; + +LMDBStoreWrapper::LMDBStoreWrapper(const Napi::CallbackInfo& info) + : ObjectWrap(info) +{ + Napi::Env env = info.Env(); + + size_t data_dir_index = 0; + std::string data_dir; + if (info.Length() > data_dir_index && info[data_dir_index].IsString()) { + data_dir = info[data_dir_index].As(); + } else { + throw Napi::TypeError::New(env, "Directory needs to be a string"); + } + + size_t map_size_index = 1; + uint64_t map_size = DEFAULT_MAP_SIZE; + if (info.Length() > map_size_index) { + if (info[map_size_index].IsNumber()) { + map_size = info[map_size_index].As().Uint32Value(); + } else { + throw Napi::TypeError::New(env, "Map size must be a number or an object"); + } + } + + size_t max_readers_index = 2; + uint max_readers = DEFAULT_MAX_READERS; + if (info.Length() > max_readers_index) { + if (info[max_readers_index].IsNumber()) { + max_readers = info[max_readers_index].As().Uint32Value(); + } else if (!info[max_readers_index].IsUndefined()) { + throw Napi::TypeError::New(env, "The number of readers must be a number"); + } + } + + _store = std::make_unique(data_dir, map_size, max_readers, 2); + + _msg_processor.register_handler(LMDBStoreMessageType::OPEN_DATABASE, this, &LMDBStoreWrapper::open_database); + + _msg_processor.register_handler(LMDBStoreMessageType::GET, this, &LMDBStoreWrapper::get); + _msg_processor.register_handler(LMDBStoreMessageType::HAS, this, &LMDBStoreWrapper::has); + + _msg_processor.register_handler(LMDBStoreMessageType::START_CURSOR, this, &LMDBStoreWrapper::start_cursor); + _msg_processor.register_handler(LMDBStoreMessageType::ADVANCE_CURSOR, this, &LMDBStoreWrapper::advance_cursor); + _msg_processor.register_handler(LMDBStoreMessageType::CLOSE_CURSOR, this, &LMDBStoreWrapper::close_cursor); + + _msg_processor.register_handler(LMDBStoreMessageType::BATCH, this, &LMDBStoreWrapper::batch); + + _msg_processor.register_handler(LMDBStoreMessageType::STATS, this, &LMDBStoreWrapper::get_stats); + + _msg_processor.register_handler(LMDBStoreMessageType::CLOSE, this, &LMDBStoreWrapper::close); +} + +Napi::Value LMDBStoreWrapper::call(const Napi::CallbackInfo& info) +{ + return _msg_processor.process_message(info); +} + +Napi::Function LMDBStoreWrapper::get_class(Napi::Env env) +{ + return DefineClass(env, + "Store", + { + LMDBStoreWrapper::InstanceMethod("call", &LMDBStoreWrapper::call), + }); +} + +BoolResponse LMDBStoreWrapper::open_database(const OpenDatabaseRequest& req) +{ + _store->open_database(req.db, !req.uniqueKeys.value_or(true)); + return { true }; +} + +GetResponse LMDBStoreWrapper::get(const GetRequest& req) +{ + lmdblib::OptionalValuesVector vals; + lmdblib::KeysVector keys = req.keys; + _store->get(keys, vals, req.db); + return { vals }; +} + +HasResponse LMDBStoreWrapper::has(const HasRequest& req) +{ + std::set key_set; + for (const auto& entry : req.entries) { + key_set.insert(entry.first); + } + + lmdblib::KeysVector keys(key_set.begin(), key_set.end()); + lmdblib::OptionalValuesVector vals; + _store->get(keys, vals, req.db); + + std::vector exists; + + for (const auto& entry : req.entries) { + const auto& key = entry.first; + const auto& requested_values = entry.second; + + const auto& key_it = std::find(keys.begin(), keys.end(), key); + if (key_it == keys.end()) { + // this shouldn't happen. It means we missed a key when we created the key_set + exists.push_back(false); + continue; + } + + // should be fine to convert this to an index in the array? + const auto& values = vals[static_cast(key_it - keys.begin())]; + + if (!values.has_value()) { + exists.push_back(false); + continue; + } + + // client just wanted to know if the key exists + if (!requested_values.has_value()) { + exists.push_back(true); + continue; + } + + exists.push_back(std::all_of(requested_values->begin(), requested_values->end(), [&](const auto& val) { + return std::find(values->begin(), values->end(), val) != values->begin(); + })); + } + + return { exists }; +} + +StartCursorResponse LMDBStoreWrapper::start_cursor(const StartCursorRequest& req) +{ + bool reverse = req.reverse.value_or(false); + uint32_t page_size = req.count.value_or(DEFAULT_CURSOR_PAGE_SIZE); + bool one_page = req.onePage.value_or(false); + lmdblib::Key key = req.key; + + auto tx = _store->create_shared_read_transaction(); + lmdblib::LMDBCursor::SharedPtr cursor = _store->create_cursor(tx, req.db); + bool start_ok = cursor->set_at_key(key); + + if (!start_ok) { + // we couldn't find exactly the requested key. Find the next biggest one. + start_ok = cursor->set_at_key_gte(key); + // if we found a key that's greater _and_ we want to go in reverse order + // then we're actually outside the requested bounds, we need to go back one position + if (start_ok && reverse) { + lmdblib::KeyDupValuesVector entries; + // read_prev returns `true` if there's nothing more to read + // turn this into a "not ok" because there's nothing in the db for this cursor to read + start_ok = !cursor->read_prev(1, entries); + } else if (!start_ok && reverse) { + // we couldn't find a key greater than our starting point _and_ we want to go in reverse.. + // then we start at the end of the database (the client requested to start at a key greater than anything in + // the DB) + start_ok = cursor->set_at_end(); + } + + // in case we're iterating in ascending order and we can't find the exact key or one that's greater than it + // then that means theren's nothing in the DB for the cursor to read + } + + // we couldn't find a starting position + if (!start_ok) { + return { std::nullopt, {} }; + } + + auto [done, first_page] = _advance_cursor(*cursor, reverse, page_size); + // cursor finished after reading a single page or client only wanted the first page + if (done || one_page) { + return { std::nullopt, first_page }; + } + + auto cursor_id = cursor->id(); + { + std::lock_guard lock(_cursor_mutex); + _cursors[cursor_id] = { cursor, reverse }; + } + + return { cursor_id, first_page }; +} + +BoolResponse LMDBStoreWrapper::close_cursor(const CloseCursorRequest& req) +{ + { + std::lock_guard lock(_cursor_mutex); + _cursors.erase(req.cursor); + } + return { true }; +} + +AdvanceCursorResponse LMDBStoreWrapper::advance_cursor(const AdvanceCursorRequest& req) +{ + CursorData data; + + { + std::lock_guard lock(_cursor_mutex); + data = _cursors.at(req.cursor); + } + + uint32_t page_size = req.count.value_or(DEFAULT_CURSOR_PAGE_SIZE); + auto [done, entries] = _advance_cursor(*data.cursor, data.reverse, page_size); + return { entries, done }; +} + +BatchResponse LMDBStoreWrapper::batch(const BatchRequest& req) +{ + std::vector batches; + batches.reserve(req.batches.size()); + + for (const auto& data : req.batches) { + lmdblib::LMDBStore::PutData batch{ data.second.addEntries, data.second.removeEntries, data.first }; + batches.push_back(batch); + } + + auto start = std::chrono::high_resolution_clock::now(); + _store->put(batches); + auto end = std::chrono::high_resolution_clock::now(); + std::chrono::duration duration_ns = end - start; + + return { duration_ns.count() }; +} + +StatsResponse LMDBStoreWrapper::get_stats() +{ + std::vector stats; + auto map_size = _store->get_stats(stats); + return { stats, map_size }; +} + +BoolResponse LMDBStoreWrapper::close() +{ + _store.reset(nullptr); + return { true }; +} + +std::pair LMDBStoreWrapper::_advance_cursor(const lmdblib::LMDBCursor& cursor, + bool reverse, + uint64_t page_size) +{ + lmdblib::KeyDupValuesVector entries; + bool done = reverse ? cursor.read_prev(page_size, entries) : cursor.read_next(page_size, entries); + return std::make_pair(done, entries); +} diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.hpp new file mode 100644 index 000000000000..2025f3b08408 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/lmdb_store/lmdb_store_wrapper.hpp @@ -0,0 +1,63 @@ +#pragma once + +#include "barretenberg/lmdblib/lmdb_cursor.hpp" +#include "barretenberg/lmdblib/lmdb_store.hpp" +#include "barretenberg/lmdblib/types.hpp" +#include "barretenberg/messaging/dispatcher.hpp" +#include "barretenberg/messaging/header.hpp" +#include "barretenberg/nodejs_module/lmdb_store/lmdb_store_message.hpp" +#include "barretenberg/nodejs_module/util/message_processor.hpp" +#include +#include +#include +#include + +namespace bb::nodejs::lmdb_store { + +struct CursorData { + lmdblib::LMDBCursor::SharedPtr cursor; + bool reverse; +}; +/** + * @brief Manages the interaction between the JavaScript runtime and the LMDB instance. + */ +class LMDBStoreWrapper : public Napi::ObjectWrap { + public: + LMDBStoreWrapper(const Napi::CallbackInfo&); + + /** + * @brief The only instance method exposed to JavaScript. Takes a msgpack Message and returns a Promise + */ + Napi::Value call(const Napi::CallbackInfo&); + + static Napi::Function get_class(Napi::Env env); + + private: + std::unique_ptr _store; + + std::mutex _cursor_mutex; + std::unordered_map _cursors; + + bb::nodejs::AsyncMessageProcessor _msg_processor; + + BoolResponse open_database(const OpenDatabaseRequest& req); + + GetResponse get(const GetRequest& req); + HasResponse has(const HasRequest& req); + + StartCursorResponse start_cursor(const StartCursorRequest& req); + AdvanceCursorResponse advance_cursor(const AdvanceCursorRequest& req); + BoolResponse close_cursor(const CloseCursorRequest& req); + + BatchResponse batch(const BatchRequest& req); + + StatsResponse get_stats(); + + BoolResponse close(); + + static std::pair _advance_cursor(const lmdblib::LMDBCursor& cursor, + bool reverse, + uint64_t page_size); +}; + +} // namespace bb::nodejs::lmdb_store diff --git a/barretenberg/cpp/src/barretenberg/world_state_napi/package.json b/barretenberg/cpp/src/barretenberg/nodejs_module/package.json similarity index 92% rename from barretenberg/cpp/src/barretenberg/world_state_napi/package.json rename to barretenberg/cpp/src/barretenberg/nodejs_module/package.json index d812caf6171e..594797b5660e 100644 --- a/barretenberg/cpp/src/barretenberg/world_state_napi/package.json +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/package.json @@ -1,5 +1,5 @@ { - "name": "world_state_napi", + "name": "nodejs_module", "private": true, "version": "0.0.0", "packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e", diff --git a/barretenberg/cpp/src/barretenberg/world_state_napi/async_op.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/util/async_op.hpp similarity index 97% rename from barretenberg/cpp/src/barretenberg/world_state_napi/async_op.hpp rename to barretenberg/cpp/src/barretenberg/nodejs_module/util/async_op.hpp index e5a4849b38a3..d5204a0c4d29 100644 --- a/barretenberg/cpp/src/barretenberg/world_state_napi/async_op.hpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/util/async_op.hpp @@ -5,7 +5,7 @@ #include #include -namespace bb::world_state { +namespace bb::nodejs { using async_fn = std::function; @@ -61,4 +61,4 @@ class AsyncOperation : public Napi::AsyncWorker { msgpack::sbuffer _result; }; -} // namespace bb::world_state +} // namespace bb::nodejs diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/util/message_processor.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/util/message_processor.hpp new file mode 100644 index 000000000000..d6dd84c2846e --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/util/message_processor.hpp @@ -0,0 +1,106 @@ +#pragma once + +#include "barretenberg/messaging/dispatcher.hpp" +#include "barretenberg/messaging/header.hpp" +#include "barretenberg/nodejs_module/util/async_op.hpp" +#include "napi.h" + +namespace bb::nodejs { + +class AsyncMessageProcessor { + public: + template void register_handler(uint32_t msgType, T* self, R (T::*handler)() const) + { + register_handler(msgType, self, handler); + } + + template void register_handler(uint32_t msgType, T* self, R (T::*handler)()) + { + _register_handler( + msgType, [=](auto, const msgpack::object&) { return (self->*handler)(); }); + } + + template + void register_handler(uint32_t msgType, T* self, R (T::*handler)(const P&) const) + { + register_handler(msgType, self, handler); + } + + template + void register_handler(uint32_t msgType, T* self, R (T::*handler)(const P&)) + { + _register_handler, R>( + msgType, + [=](const messaging::TypedMessage

& req, const msgpack::object&) { return (self->*handler)(req.value); }); + } + + template + void register_handler(uint32_t msgType, T* self, R (T::*handler)(const P&, const msgpack::object&) const) + { + register_handler(msgType, self, handler); + } + + template + void register_handler(uint32_t msgType, T* self, R (T::*handler)(const P&, const msgpack::object&)) + { + _register_handler, R>( + msgType, [=](const messaging::TypedMessage

& req, const msgpack::object& obj) { + return (self->*handler)(req.value, obj); + }); + } + + Napi::Promise process_message(const Napi::CallbackInfo& info) + { + Napi::Env env = info.Env(); + // keep this in a shared pointer so that AsyncOperation can resolve/reject the promise once the execution is + // complete on an separate thread + auto deferred = std::make_shared(env); + + if (info.Length() < 1) { + deferred->Reject(Napi::TypeError::New(env, "Wrong number of arguments").Value()); + } else if (!info[0].IsBuffer()) { + deferred->Reject(Napi::TypeError::New(env, "Argument must be a buffer").Value()); + } else { + auto buffer = info[0].As>(); + size_t length = buffer.Length(); + // we mustn't access the Napi::Env outside of this top-level function + // so copy the data to a variable we own + // and make it a shared pointer so that it doesn't get destroyed as soon as we exit this code block + auto data = std::make_shared>(length); + std::copy_n(buffer.Data(), length, data->data()); + + auto* op = new bb::nodejs::AsyncOperation(env, deferred, [=](msgpack::sbuffer& buf) { + msgpack::object_handle obj_handle = msgpack::unpack(data->data(), length); + msgpack::object obj = obj_handle.get(); + dispatcher.onNewData(obj, buf); + }); + + // Napi is now responsible for destroying this object + op->Queue(); + } + + return deferred->Promise(); + } + + private: + bb::messaging::MessageDispatcher dispatcher; + + template + void _register_handler(uint32_t msgType, const std::function& fn) + { + dispatcher.registerTarget(msgType, [=](msgpack::object& obj, msgpack::sbuffer& buffer) { + P req_msg; + obj.convert(req_msg); + + R response = fn(req_msg, obj); + + bb::messaging::MsgHeader header(req_msg.header.messageId); + bb::messaging::TypedMessage resp_msg(msgType, header, response); + msgpack::pack(buffer, resp_msg); + + return true; + }); + } +}; + +} // namespace bb::nodejs diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/util/promise.cpp b/barretenberg/cpp/src/barretenberg/nodejs_module/util/promise.cpp new file mode 100644 index 000000000000..9216566c222c --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/util/promise.cpp @@ -0,0 +1,12 @@ + +#include "napi.h" + +namespace bb::nodejs { + +Napi::Promise promise_reject(const Napi::Env& env, const Napi::Value& err) +{ + auto def = Napi::Promise::Deferred::New(env); + def.Reject(err); + return def.Promise(); +} +} // namespace bb::nodejs diff --git a/barretenberg/cpp/src/barretenberg/nodejs_module/util/promise.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/util/promise.hpp new file mode 100644 index 000000000000..f157352eaa86 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/util/promise.hpp @@ -0,0 +1,9 @@ + +#pragma once + +#include "barretenberg/messaging/dispatcher.hpp" +#include "napi.h" + +namespace bb::nodejs { +Napi::Promise promise_reject(const Napi::Env& env, const Napi::Value& err); +} diff --git a/barretenberg/cpp/src/barretenberg/world_state_napi/addon.cpp b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.cpp similarity index 90% rename from barretenberg/cpp/src/barretenberg/world_state_napi/addon.cpp rename to barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.cpp index 1f343e32e2f6..31f2c66d97b0 100644 --- a/barretenberg/cpp/src/barretenberg/world_state_napi/addon.cpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.cpp @@ -1,15 +1,15 @@ -#include "barretenberg/world_state_napi/addon.hpp" +#include "barretenberg/nodejs_module/world_state/world_state.hpp" #include "barretenberg/crypto/merkle_tree/hash_path.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" #include "barretenberg/crypto/merkle_tree/response.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/messaging/header.hpp" +#include "barretenberg/nodejs_module/util/async_op.hpp" +#include "barretenberg/nodejs_module/world_state/world_state_message.hpp" #include "barretenberg/world_state/fork.hpp" #include "barretenberg/world_state/types.hpp" #include "barretenberg/world_state/world_state.hpp" -#include "barretenberg/world_state_napi/async_op.hpp" -#include "barretenberg/world_state_napi/message.hpp" #include "msgpack/v3/pack_decl.hpp" #include "msgpack/v3/sbuffer_decl.hpp" #include "napi.h" @@ -25,13 +25,14 @@ #include #include +using namespace bb::nodejs; using namespace bb::world_state; using namespace bb::crypto::merkle_tree; using namespace bb::messaging; -const uint64_t DEFAULT_MAP_SIZE = 1024 * 1024; +const uint64_t DEFAULT_MAP_SIZE = 1024UL * 1024; -WorldStateAddon::WorldStateAddon(const Napi::CallbackInfo& info) +WorldStateWrapper::WorldStateWrapper(const Napi::CallbackInfo& info) : ObjectWrap(info) { uint64_t thread_pool_size = 16; @@ -217,7 +218,7 @@ WorldStateAddon::WorldStateAddon(const Napi::CallbackInfo& info) [this](msgpack::object& obj, msgpack::sbuffer& buffer) { return close(obj, buffer); }); } -Napi::Value WorldStateAddon::call(const Napi::CallbackInfo& info) +Napi::Value WorldStateWrapper::call(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); // keep this in a shared pointer so that AsyncOperation can resolve/reject the promise once the execution is @@ -252,7 +253,7 @@ Napi::Value WorldStateAddon::call(const Napi::CallbackInfo& info) return deferred->Promise(); } -bool WorldStateAddon::get_tree_info(msgpack::object& obj, msgpack::sbuffer& buffer) const +bool WorldStateWrapper::get_tree_info(msgpack::object& obj, msgpack::sbuffer& buffer) const { TypedMessage request; obj.convert(request); @@ -269,7 +270,7 @@ bool WorldStateAddon::get_tree_info(msgpack::object& obj, msgpack::sbuffer& buff return true; } -bool WorldStateAddon::get_state_reference(msgpack::object& obj, msgpack::sbuffer& buffer) const +bool WorldStateWrapper::get_state_reference(msgpack::object& obj, msgpack::sbuffer& buffer) const { TypedMessage request; obj.convert(request); @@ -284,7 +285,7 @@ bool WorldStateAddon::get_state_reference(msgpack::object& obj, msgpack::sbuffer return true; } -bool WorldStateAddon::get_initial_state_reference(msgpack::object& obj, msgpack::sbuffer& buffer) const +bool WorldStateWrapper::get_initial_state_reference(msgpack::object& obj, msgpack::sbuffer& buffer) const { HeaderOnlyMessage request; obj.convert(request); @@ -299,7 +300,7 @@ bool WorldStateAddon::get_initial_state_reference(msgpack::object& obj, msgpack: return true; } -bool WorldStateAddon::get_leaf_value(msgpack::object& obj, msgpack::sbuffer& buffer) const +bool WorldStateWrapper::get_leaf_value(msgpack::object& obj, msgpack::sbuffer& buffer) const { TypedMessage request; obj.convert(request); @@ -343,7 +344,7 @@ bool WorldStateAddon::get_leaf_value(msgpack::object& obj, msgpack::sbuffer& buf return true; } -bool WorldStateAddon::get_leaf_preimage(msgpack::object& obj, msgpack::sbuffer& buffer) const +bool WorldStateWrapper::get_leaf_preimage(msgpack::object& obj, msgpack::sbuffer& buffer) const { TypedMessage request; obj.convert(request); @@ -377,7 +378,7 @@ bool WorldStateAddon::get_leaf_preimage(msgpack::object& obj, msgpack::sbuffer& return true; } -bool WorldStateAddon::get_sibling_path(msgpack::object& obj, msgpack::sbuffer& buffer) const +bool WorldStateWrapper::get_sibling_path(msgpack::object& obj, msgpack::sbuffer& buffer) const { TypedMessage request; obj.convert(request); @@ -392,7 +393,7 @@ bool WorldStateAddon::get_sibling_path(msgpack::object& obj, msgpack::sbuffer& b return true; } -bool WorldStateAddon::get_block_numbers_for_leaf_indices(msgpack::object& obj, msgpack::sbuffer& buffer) const +bool WorldStateWrapper::get_block_numbers_for_leaf_indices(msgpack::object& obj, msgpack::sbuffer& buffer) const { TypedMessage request; obj.convert(request); @@ -410,7 +411,7 @@ bool WorldStateAddon::get_block_numbers_for_leaf_indices(msgpack::object& obj, m return true; } -bool WorldStateAddon::find_leaf_indices(msgpack::object& obj, msgpack::sbuffer& buffer) const +bool WorldStateWrapper::find_leaf_indices(msgpack::object& obj, msgpack::sbuffer& buffer) const { TypedMessage request; obj.convert(request); @@ -452,7 +453,7 @@ bool WorldStateAddon::find_leaf_indices(msgpack::object& obj, msgpack::sbuffer& return true; } -bool WorldStateAddon::find_low_leaf(msgpack::object& obj, msgpack::sbuffer& buffer) const +bool WorldStateWrapper::find_low_leaf(msgpack::object& obj, msgpack::sbuffer& buffer) const { TypedMessage request; obj.convert(request); @@ -468,7 +469,7 @@ bool WorldStateAddon::find_low_leaf(msgpack::object& obj, msgpack::sbuffer& buff return true; } -bool WorldStateAddon::append_leaves(msgpack::object& obj, msgpack::sbuffer& buf) +bool WorldStateWrapper::append_leaves(msgpack::object& obj, msgpack::sbuffer& buf) { TypedMessage request; obj.convert(request); @@ -503,7 +504,7 @@ bool WorldStateAddon::append_leaves(msgpack::object& obj, msgpack::sbuffer& buf) return true; } -bool WorldStateAddon::batch_insert(msgpack::object& obj, msgpack::sbuffer& buffer) +bool WorldStateWrapper::batch_insert(msgpack::object& obj, msgpack::sbuffer& buffer) { TypedMessage request; obj.convert(request); @@ -539,7 +540,7 @@ bool WorldStateAddon::batch_insert(msgpack::object& obj, msgpack::sbuffer& buffe return true; } -bool WorldStateAddon::sequential_insert(msgpack::object& obj, msgpack::sbuffer& buffer) +bool WorldStateWrapper::sequential_insert(msgpack::object& obj, msgpack::sbuffer& buffer) { TypedMessage request; obj.convert(request); @@ -575,7 +576,7 @@ bool WorldStateAddon::sequential_insert(msgpack::object& obj, msgpack::sbuffer& return true; } -bool WorldStateAddon::update_archive(msgpack::object& obj, msgpack::sbuffer& buf) +bool WorldStateWrapper::update_archive(msgpack::object& obj, msgpack::sbuffer& buf) { TypedMessage request; obj.convert(request); @@ -589,7 +590,7 @@ bool WorldStateAddon::update_archive(msgpack::object& obj, msgpack::sbuffer& buf return true; } -bool WorldStateAddon::commit(msgpack::object& obj, msgpack::sbuffer& buf) +bool WorldStateWrapper::commit(msgpack::object& obj, msgpack::sbuffer& buf) { HeaderOnlyMessage request; obj.convert(request); @@ -604,7 +605,7 @@ bool WorldStateAddon::commit(msgpack::object& obj, msgpack::sbuffer& buf) return true; } -bool WorldStateAddon::rollback(msgpack::object& obj, msgpack::sbuffer& buf) +bool WorldStateWrapper::rollback(msgpack::object& obj, msgpack::sbuffer& buf) { HeaderOnlyMessage request; obj.convert(request); @@ -618,7 +619,7 @@ bool WorldStateAddon::rollback(msgpack::object& obj, msgpack::sbuffer& buf) return true; } -bool WorldStateAddon::sync_block(msgpack::object& obj, msgpack::sbuffer& buf) +bool WorldStateWrapper::sync_block(msgpack::object& obj, msgpack::sbuffer& buf) { TypedMessage request; obj.convert(request); @@ -637,7 +638,7 @@ bool WorldStateAddon::sync_block(msgpack::object& obj, msgpack::sbuffer& buf) return true; } -bool WorldStateAddon::create_fork(msgpack::object& obj, msgpack::sbuffer& buf) +bool WorldStateWrapper::create_fork(msgpack::object& obj, msgpack::sbuffer& buf) { TypedMessage request; obj.convert(request); @@ -654,7 +655,7 @@ bool WorldStateAddon::create_fork(msgpack::object& obj, msgpack::sbuffer& buf) return true; } -bool WorldStateAddon::delete_fork(msgpack::object& obj, msgpack::sbuffer& buf) +bool WorldStateWrapper::delete_fork(msgpack::object& obj, msgpack::sbuffer& buf) { TypedMessage request; obj.convert(request); @@ -668,7 +669,7 @@ bool WorldStateAddon::delete_fork(msgpack::object& obj, msgpack::sbuffer& buf) return true; } -bool WorldStateAddon::close(msgpack::object& obj, msgpack::sbuffer& buf) +bool WorldStateWrapper::close(msgpack::object& obj, msgpack::sbuffer& buf) { HeaderOnlyMessage request; obj.convert(request); @@ -684,7 +685,7 @@ bool WorldStateAddon::close(msgpack::object& obj, msgpack::sbuffer& buf) return true; } -bool WorldStateAddon::set_finalised(msgpack::object& obj, msgpack::sbuffer& buf) const +bool WorldStateWrapper::set_finalised(msgpack::object& obj, msgpack::sbuffer& buf) const { TypedMessage request; obj.convert(request); @@ -697,7 +698,7 @@ bool WorldStateAddon::set_finalised(msgpack::object& obj, msgpack::sbuffer& buf) return true; } -bool WorldStateAddon::unwind(msgpack::object& obj, msgpack::sbuffer& buf) const +bool WorldStateWrapper::unwind(msgpack::object& obj, msgpack::sbuffer& buf) const { TypedMessage request; obj.convert(request); @@ -711,7 +712,7 @@ bool WorldStateAddon::unwind(msgpack::object& obj, msgpack::sbuffer& buf) const return true; } -bool WorldStateAddon::remove_historical(msgpack::object& obj, msgpack::sbuffer& buf) const +bool WorldStateWrapper::remove_historical(msgpack::object& obj, msgpack::sbuffer& buf) const { TypedMessage request; obj.convert(request); @@ -725,7 +726,7 @@ bool WorldStateAddon::remove_historical(msgpack::object& obj, msgpack::sbuffer& return true; } -bool WorldStateAddon::get_status(msgpack::object& obj, msgpack::sbuffer& buf) const +bool WorldStateWrapper::get_status(msgpack::object& obj, msgpack::sbuffer& buf) const { HeaderOnlyMessage request; obj.convert(request); @@ -740,21 +741,11 @@ bool WorldStateAddon::get_status(msgpack::object& obj, msgpack::sbuffer& buf) co return true; } -Napi::Function WorldStateAddon::get_class(Napi::Env env) +Napi::Function WorldStateWrapper::get_class(Napi::Env env) { return DefineClass(env, "WorldState", { - WorldStateAddon::InstanceMethod("call", &WorldStateAddon::call), + WorldStateWrapper::InstanceMethod("call", &WorldStateWrapper::call), }); } - -Napi::Object Init(Napi::Env env, Napi::Object exports) -{ - Napi::String name = Napi::String::New(env, "WorldState"); - exports.Set(name, WorldStateAddon::get_class(env)); - return exports; -} - -// NOLINTNEXTLINE -NODE_API_MODULE(addon, Init) diff --git a/barretenberg/cpp/src/barretenberg/world_state_napi/addon.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.hpp similarity index 91% rename from barretenberg/cpp/src/barretenberg/world_state_napi/addon.hpp rename to barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.hpp index 14318c1bb20e..f6c070db92d2 100644 --- a/barretenberg/cpp/src/barretenberg/world_state_napi/addon.hpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state.hpp @@ -1,21 +1,21 @@ #pragma once #include "barretenberg/messaging/dispatcher.hpp" +#include "barretenberg/nodejs_module/world_state/world_state_message.hpp" #include "barretenberg/world_state/types.hpp" #include "barretenberg/world_state/world_state.hpp" -#include "barretenberg/world_state_napi/message.hpp" #include #include #include -namespace bb::world_state { +namespace bb::nodejs { /** * @brief Manages the interaction between the JavaScript runtime and the WorldState class. */ -class WorldStateAddon : public Napi::ObjectWrap { +class WorldStateWrapper : public Napi::ObjectWrap { public: - WorldStateAddon(const Napi::CallbackInfo&); + WorldStateWrapper(const Napi::CallbackInfo&); /** * @brief The only instance method exposed to JavaScript. Takes a msgpack Message and returns a Promise @@ -66,4 +66,4 @@ class WorldStateAddon : public Napi::ObjectWrap { bool get_status(msgpack::object& obj, msgpack::sbuffer& buffer) const; }; -} // namespace bb::world_state +} // namespace bb::nodejs diff --git a/barretenberg/cpp/src/barretenberg/world_state_napi/message.hpp b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp similarity index 96% rename from barretenberg/cpp/src/barretenberg/world_state_napi/message.hpp rename to barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp index d903ed7dc2f4..a207a0fe2753 100644 --- a/barretenberg/cpp/src/barretenberg/world_state_napi/message.hpp +++ b/barretenberg/cpp/src/barretenberg/nodejs_module/world_state/world_state_message.hpp @@ -4,14 +4,16 @@ #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/messaging/header.hpp" #include "barretenberg/serialize/msgpack.hpp" +#include "barretenberg/world_state/fork.hpp" #include "barretenberg/world_state/types.hpp" #include #include #include -namespace bb::world_state { +namespace bb::nodejs { using namespace bb::messaging; +using namespace bb::world_state; enum WorldStateMessageType { GET_TREE_INFO = FIRST_APP_MSG_TYPE, @@ -220,6 +222,6 @@ struct SyncBlockRequest { publicDataWrites); }; -} // namespace bb::world_state +} // namespace bb::nodejs -MSGPACK_ADD_ENUM(bb::world_state::WorldStateMessageType) +MSGPACK_ADD_ENUM(bb::nodejs::WorldStateMessageType) diff --git a/barretenberg/cpp/src/barretenberg/world_state_napi/yarn.lock b/barretenberg/cpp/src/barretenberg/nodejs_module/yarn.lock similarity index 100% rename from barretenberg/cpp/src/barretenberg/world_state_napi/yarn.lock rename to barretenberg/cpp/src/barretenberg/nodejs_module/yarn.lock diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp index 5b47b68d5518..f7fba9cc6c60 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp @@ -3,18 +3,17 @@ #include "barretenberg/crypto/merkle_tree/hash.hpp" #include "barretenberg/crypto/merkle_tree/hash_path.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/callbacks.hpp" #include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_store.hpp" #include "barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp" #include "barretenberg/crypto/merkle_tree/response.hpp" #include "barretenberg/crypto/merkle_tree/signal.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" +#include "barretenberg/lmdblib/lmdb_helpers.hpp" #include "barretenberg/vm/aztec_constants.hpp" #include "barretenberg/world_state/fork.hpp" #include "barretenberg/world_state/tree_with_store.hpp" #include "barretenberg/world_state/types.hpp" #include "barretenberg/world_state/world_state_stores.hpp" -#include "barretenberg/world_state_napi/message.hpp" #include #include #include diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp index 702ebfe90409..20aeaa2bcfab 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.hpp @@ -6,19 +6,18 @@ #include "barretenberg/crypto/merkle_tree/hash_path.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/content_addressed_indexed_tree.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_environment.hpp" #include "barretenberg/crypto/merkle_tree/node_store/cached_content_addressed_tree_store.hpp" #include "barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp" #include "barretenberg/crypto/merkle_tree/response.hpp" #include "barretenberg/crypto/merkle_tree/signal.hpp" #include "barretenberg/crypto/merkle_tree/types.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" +#include "barretenberg/lmdblib/lmdb_environment.hpp" #include "barretenberg/serialize/msgpack.hpp" #include "barretenberg/world_state/fork.hpp" #include "barretenberg/world_state/tree_with_store.hpp" #include "barretenberg/world_state/types.hpp" #include "barretenberg/world_state/world_state_stores.hpp" -#include "barretenberg/world_state_napi/message.hpp" #include #include #include diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp index 23a335eb2195..1e40cf6b5ad4 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.test.cpp @@ -1,7 +1,6 @@ #include "barretenberg/world_state/world_state.hpp" #include "barretenberg/crypto/merkle_tree/fixtures.hpp" #include "barretenberg/crypto/merkle_tree/indexed_tree/indexed_leaf.hpp" -#include "barretenberg/crypto/merkle_tree/lmdb_store/lmdb_tree_read_transaction.hpp" #include "barretenberg/crypto/merkle_tree/node_store/tree_meta.hpp" #include "barretenberg/crypto/merkle_tree/response.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" diff --git a/boxes/Dockerfile b/boxes/Dockerfile index 18ed055905e7..3803f46a2f76 100644 --- a/boxes/Dockerfile +++ b/boxes/Dockerfile @@ -21,5 +21,5 @@ COPY . . ENV AZTEC_NARGO=/usr/aztec-nargo/compile_then_postprocess.sh ENV AZTEC_BUILDER=/usr/src/yarn-project/builder/aztec-builder-dest RUN yarn -RUN npx -y playwright@1.50 install --with-deps +RUN npx -y playwright@1.49 install --with-deps ENTRYPOINT ["/bin/sh", "-c"] diff --git a/boxes/boxes/react/package.json b/boxes/boxes/react/package.json index 7009fd901527..373d58aefd84 100644 --- a/boxes/boxes/react/package.json +++ b/boxes/boxes/react/package.json @@ -46,7 +46,7 @@ "yup": "^1.2.0" }, "devDependencies": { - "@playwright/test": "1.50.0", + "@playwright/test": "1.49.0", "@types/jest": "^29.5.0", "@types/node": "^20.5.9", "@types/react": "^18.2.15", diff --git a/boxes/boxes/vanilla/package.json b/boxes/boxes/vanilla/package.json index ee0117a1fc2e..6f1182198bbd 100644 --- a/boxes/boxes/vanilla/package.json +++ b/boxes/boxes/vanilla/package.json @@ -21,7 +21,7 @@ "@aztec/aztec.js": "latest" }, "devDependencies": { - "@playwright/test": "^1.50.0", + "@playwright/test": "1.49.0", "@types/node": "^20.11.17", "assert": "^2.1.0", "copy-webpack-plugin": "^12.0.2", diff --git a/boxes/boxes/vanilla/webpack.config.js b/boxes/boxes/vanilla/webpack.config.js index 7cbecf9f7ad7..916f9ded909d 100644 --- a/boxes/boxes/vanilla/webpack.config.js +++ b/boxes/boxes/vanilla/webpack.config.js @@ -2,6 +2,8 @@ import CopyPlugin from 'copy-webpack-plugin'; import { createRequire } from 'module'; import webpack from 'webpack'; import HtmlWebpackPlugin from 'html-webpack-plugin'; +import { resolve } from 'path'; + const require = createRequire(import.meta.url); export default (_, argv) => ({ @@ -27,7 +29,7 @@ export default (_, argv) => ({ new CopyPlugin({ patterns: [ { - context: '../../../barretenberg/ts/dest/browser', + context: resolve(require.resolve('@aztec/aztec.js'), '../'), from: '*.gz', }, ], diff --git a/boxes/contract-only/package.json b/boxes/contract-only/package.json index 5b9c8d738109..289cd06daa00 100644 --- a/boxes/contract-only/package.json +++ b/boxes/contract-only/package.json @@ -35,7 +35,7 @@ "rootDir": "./" }, "devDependencies": { - "@playwright/test": "1.50.0", + "@playwright/test": "1.49.0", "@types/jest": "^29.5.0", "@types/node": "^20.11.17", "copy-webpack-plugin": "^11.0.0", diff --git a/boxes/docker-compose.yml b/boxes/docker-compose.yml index c127d1e8e334..73791dfdc9b5 100644 --- a/boxes/docker-compose.yml +++ b/boxes/docker-compose.yml @@ -41,7 +41,6 @@ services: working_dir: /root/aztec-packages/boxes entrypoint: > sh -c ' - npx -y playwright@1.50 install --with-deps yarn workspace @aztec/$$BOX test --project=$$BROWSER ' environment: diff --git a/boxes/package.json b/boxes/package.json index 676d63851316..4392c07495a6 100644 --- a/boxes/package.json +++ b/boxes/package.json @@ -46,6 +46,6 @@ "vitest": "^2.0.5" }, "devDependencies": { - "@playwright/test": "^1.50.0" + "@playwright/test": "1.49.0" } } diff --git a/boxes/yarn.lock b/boxes/yarn.lock index 5803a990f969..2f35a3788f18 100644 --- a/boxes/yarn.lock +++ b/boxes/yarn.lock @@ -63,7 +63,7 @@ __metadata: dependencies: "@aztec/accounts": "npm:latest" "@aztec/aztec.js": "npm:latest" - "@playwright/test": "npm:1.50.0" + "@playwright/test": "npm:1.49.0" "@types/jest": "npm:^29.5.0" "@types/node": "npm:^20.5.9" "@types/react": "npm:^18.2.15" @@ -119,7 +119,7 @@ __metadata: dependencies: "@aztec/accounts": "npm:latest" "@aztec/aztec.js": "npm:latest" - "@playwright/test": "npm:^1.50.0" + "@playwright/test": "npm:1.49.0" "@types/node": "npm:^20.11.17" assert: "npm:^2.1.0" copy-webpack-plugin: "npm:^12.0.2" @@ -1622,14 +1622,14 @@ __metadata: languageName: node linkType: hard -"@playwright/test@npm:1.50.0, @playwright/test@npm:^1.50.0": - version: 1.50.0 - resolution: "@playwright/test@npm:1.50.0" +"@playwright/test@npm:1.49.0": + version: 1.49.0 + resolution: "@playwright/test@npm:1.49.0" dependencies: - playwright: "npm:1.50.0" + playwright: "npm:1.49.0" bin: playwright: cli.js - checksum: 10c0/70b46eab2a5c8b4accc1c8a29a0ea371b7b8f56b0d38509e5c06354ebc60dc262837e92cea727076aea5e1c32f31e215c02fbde977519a7e38488cfc48f0ba5c + checksum: 10c0/2890d52ee45bd83b5501f17a77c77f12ba934d257fda4b288405c6d91f94b83c4fcbdff3c0be89c2aaeea3d13576b72ec9a70be667ff844b342044afd72a246e languageName: node linkType: hard @@ -3626,7 +3626,7 @@ __metadata: "@inquirer/confirm": "npm:^3.0.0" "@inquirer/input": "npm:^2.0.0" "@inquirer/select": "npm:^2.0.0" - "@playwright/test": "npm:^1.50.0" + "@playwright/test": "npm:1.49.0" axios: "npm:^1.6.7" commander: "npm:^12.1.0" ora: "npm:^8.0.1" @@ -9795,27 +9795,27 @@ __metadata: languageName: node linkType: hard -"playwright-core@npm:1.50.0": - version: 1.50.0 - resolution: "playwright-core@npm:1.50.0" +"playwright-core@npm:1.49.0": + version: 1.49.0 + resolution: "playwright-core@npm:1.49.0" bin: playwright-core: cli.js - checksum: 10c0/b0cc7fadcb2db68a7b8d730b26c7a7d17baad454a0697c781e08074a619e57779a90be9b57c4c741ff4895390bdfd093d8393a746e8bf68ae57ac452f4c1cdb2 + checksum: 10c0/22c1a72fabdcc87bd1cd4d40a032d2c5b94cf94ba7484dc182048c3fa1c8ec26180b559d8cac4ca9870e8fd6bdf5ef9d9f54e7a31fd60d67d098fcffc5e4253b languageName: node linkType: hard -"playwright@npm:1.50.0": - version: 1.50.0 - resolution: "playwright@npm:1.50.0" +"playwright@npm:1.49.0": + version: 1.49.0 + resolution: "playwright@npm:1.49.0" dependencies: fsevents: "npm:2.3.2" - playwright-core: "npm:1.50.0" + playwright-core: "npm:1.49.0" dependenciesMeta: fsevents: optional: true bin: playwright: cli.js - checksum: 10c0/0076a536433819b7122066a07c5fcfa56d40d09cbbec0a39061bbfa832c8a1f626df5e4fe206fbeba56b3a61f0e2b26d4ad3c2b402852d6f147a266fd18e4ddf + checksum: 10c0/e94d662747cd147d0573570fec90dadc013c1097595714036fc8934a075c5a82ab04a49111b03b1f762ea86429bdb7c94460901896901e20970b30ce817cc93f languageName: node linkType: hard diff --git a/build-images/Earthfile b/build-images/Earthfile index 83d9d932c2a2..bed625beafc7 100644 --- a/build-images/Earthfile +++ b/build-images/Earthfile @@ -195,7 +195,7 @@ build: -o /usr/local/bin/yq && chmod +x /usr/local/bin/yq # Install playwright for browser testing. - RUN npx -y playwright@1.50 install --with-deps + RUN npx -y playwright@1.49 install --with-deps ARG TARGETARCH # NOTE: bump this version when doing non-backwards compatible changes diff --git a/docs/docs/migration_notes.md b/docs/docs/migration_notes.md index c4e3d2dfaf5b..9ac4b7e756c3 100644 --- a/docs/docs/migration_notes.md +++ b/docs/docs/migration_notes.md @@ -76,6 +76,26 @@ pub trait NoteInterface { } ``` +### [PXE] Cleanup of Contract and ContractClass information getters + +```diff +- pxe.isContractInitialized +- pxe.getContractInstance +- pxe.isContractPubliclyDeployed ++ pxe.getContractMetadata +``` + +have been merged into getContractMetadata + +```diff +- pxe.getContractClass +- pxe.isContractClassPubliclyRegistered +- pxe.getContractArtifact ++ pxe.getContractClassMetadata +``` + +These functions have been merged into `pxe.getContractMetadata` and `pxe.getContractClassMetadata`. + ## 0.72.0 ### Some functions in `aztec.js` and `@aztec/accounts` are now async In our efforts to make libraries more browser-friendly and providing with more bundling options for `bb.js` (like a non top-level-await version), some functions are being made async, in particular those that access our cryptographic functions. diff --git a/noir-projects/aztec-nr/.gitrepo b/noir-projects/aztec-nr/.gitrepo index e237f3b41698..10490a14e16b 100644 --- a/noir-projects/aztec-nr/.gitrepo +++ b/noir-projects/aztec-nr/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/aztec-nr branch = master - commit = 5b8facfee13abf89b4058ea0ec9d228b4cd6b391 + commit = ee047c9b696b4a7de528e262377c23e102caf79d method = merge cmdver = 0.4.6 - parent = 9f8cf0a720044ee7512f9cc92589f986f165f8f7 + parent = 55c6e900161a9f7921f42096885784647025905e diff --git a/noir-projects/noir-contracts/scripts/flamegraph.sh b/noir-projects/noir-contracts/scripts/flamegraph.sh index 48392b3b4d21..ba3dbcdb1ec1 100755 --- a/noir-projects/noir-contracts/scripts/flamegraph.sh +++ b/noir-projects/noir-contracts/scripts/flamegraph.sh @@ -3,9 +3,9 @@ set -eu # Function to clean up and exit cleanup_and_exit() { - echo "Cleaning up..." - rm -f "$SCRIPT_DIR/../target/$FUNCTION_ARTIFACT" - exit 0 + echo "Cleaning up..." + rm -f "$FUNCTION_ARTIFACT" + exit 0 } # Trap SIGINT (Ctrl+C) and call cleanup_and_exit @@ -13,25 +13,28 @@ trap cleanup_and_exit SIGINT # If first arg is -h or --help, print usage if [ "$1" == "-h" ] || [ "$1" == "--help" ]; then - echo "Usage: $0 " - echo "e.g.: $0 Token transfer" - echo "Generates a flamegraph for the given contract and function" - exit 0 + echo "Generates a flamegraph for the given contract and function" + echo "Usage: $0 " + echo "e.g.: $0 ./target/voting_contract_Voting.json vote" + echo "e.g.: $0 Token transfer" + exit 0 fi # Get the directory of the script SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROFILER="$SCRIPT_DIR/../../../noir/noir-repo/target/release/noir-profiler" +PROFILER=${PROFILER_PATH:-"$SCRIPT_DIR/../../../noir/noir-repo/target/release/noir-profiler"} +BACKEND_PATH=${BACKEND_PATH:-"$SCRIPT_DIR/../../../barretenberg/cpp/build/bin/bb"} +SERVE=${SERVE:-"1"} if [ ! -f $PROFILER ]; then - echo "Profiler not found, building profiler" - cd "$SCRIPT_DIR/../../../noir/noir-repo/tooling/profiler" - cargo build --release - cd "$SCRIPT_DIR" + echo "Profiler not found, building profiler" + cd "$SCRIPT_DIR/../../../noir/noir-repo/tooling/profiler" + cargo build --release + cd "$SCRIPT_DIR" fi -# first console arg is contract name in camel case (e.g. TokenBridge) +# first console arg is contract name in camel case or path to contract artifact CONTRACT=$1 # second console arg is the contract function @@ -49,25 +52,48 @@ function sed_wrapper() { fi } -# convert contract name to following format: token_bridge_contract-TokenBridge.json -ARTIFACT=$(echo "$CONTRACT" | sed_wrapper -r 's/^([A-Z])/\L\1/; s/([a-z0-9])([A-Z])/\1_\L\2/g') -ARTIFACT=$(echo "$ARTIFACT" | tr '[:upper:]' '[:lower:]') -ARTIFACT_NAME="${ARTIFACT}_contract-${CONTRACT}" - -# Extract artifact for the specific function -node "$SCRIPT_DIR/extractFunctionAsNoirArtifact.js" "$SCRIPT_DIR/../target/${ARTIFACT_NAME}.json" $FUNCTION +if [[ "$CONTRACT" == *.json ]]; then + if [ ! -f "$CONTRACT" ]; then + echo "Error: Contract artifact not found at $CONTRACT" + exit 1 + fi + ARTIFACT_PATH=$CONTRACT + FUNCTION_ARTIFACT="${ARTIFACT_PATH%%.json}-${FUNCTION}.json" +else + # convert contract name to following format: token_bridge_contract-TokenBridge.json + ARTIFACT=$(echo "$CONTRACT" | sed_wrapper -r 's/^([A-Z])/\L\1/; s/([a-z0-9])([A-Z])/\1_\L\2/g') + ARTIFACT=$(echo "$ARTIFACT" | tr '[:upper:]' '[:lower:]') + ARTIFACT_NAME="${ARTIFACT}_contract-${CONTRACT}" + ARTIFACT_PATH="$SCRIPT_DIR/../target/${ARTIFACT_NAME}.json" + FUNCTION_ARTIFACT="$SCRIPT_DIR/../target/${ARTIFACT_NAME}-${FUNCTION}.json" +fi -FUNCTION_ARTIFACT="${ARTIFACT_NAME}-${FUNCTION}.json" +# Extract artifact for the specific function (will save to $FUNCTION_ARTIFACT) +node "$SCRIPT_DIR/extractFunctionAsNoirArtifact.js" "$ARTIFACT_PATH" $FUNCTION -# We create dest directory and use it as an output for the generated main.svg file -mkdir -p "$SCRIPT_DIR/../dest" +if [ "$SERVE" == "true" ]; then + # We create dest directory and use it as an output for the generated main.svg file + OUTPUT_DIR="$SCRIPT_DIR/../dest" + mkdir -p "$OUTPUT_DIR" +else + # Save the flamegraph to the same directory as the artifact + OUTPUT_DIR=$(dirname "$ARTIFACT_PATH") +fi # At last, generate the flamegraph -$PROFILER gates --artifact-path "$SCRIPT_DIR/../target/$FUNCTION_ARTIFACT" --backend-path "$SCRIPT_DIR/../../../barretenberg/cpp/build/bin/bb" --backend-gates-command "gates_for_ivc" --output "$SCRIPT_DIR/../dest" - -# serve the file over http -echo "Serving flamegraph at http://0.0.0.0:8000/main::gates.svg" -python3 -m http.server --directory "$SCRIPT_DIR/../dest" 8000 +$PROFILER gates --artifact-path "$FUNCTION_ARTIFACT" --backend-path "$BACKEND_PATH" --backend-gates-command "gates_for_ivc" --output "$OUTPUT_DIR" + +# save as $ARTIFACT_NAME-${FUNCTION}-flamegraph.svg +OUTPUT_FILE="${OUTPUT_DIR}/$(basename ${ARTIFACT_PATH%%.json})-${FUNCTION}-flamegraph.svg" +mv "$OUTPUT_DIR/main::gates.svg" "$OUTPUT_FILE" + +if [ "$SERVE" == "1" ]; then + # serve the file over http + echo -e "\nServing flamegraph at http://0.0.0.0:8000/$(basename $OUTPUT_FILE)\n" + npx -y http-server --silent -p 8000 "$OUTPUT_DIR" +else + echo -e "\nFlamegraph $(basename $OUTPUT_FILE) saved to artifacts directory. You can open it in your browser.\n" +fi # Clean up before exiting -cleanup_and_exit \ No newline at end of file +cleanup_and_exit diff --git a/noir/bootstrap.sh b/noir/bootstrap.sh index 5554e6b9b5a1..08d9e325a4bd 100755 --- a/noir/bootstrap.sh +++ b/noir/bootstrap.sh @@ -12,7 +12,7 @@ function build { export COMMIT_HASH="$(echo "$hash" | sed 's/-.*//g')" denoise ./scripts/bootstrap_native.sh denoise ./scripts/bootstrap_packages.sh - cache_upload noir-$hash.tar.gz noir-repo/target/release/nargo noir-repo/target/release/acvm packages + cache_upload noir-$hash.tar.gz noir-repo/target/release/nargo noir-repo/target/release/acvm noir-repo/target/release/noir-profiler packages fi github_endgroup } diff --git a/noir/noir-repo/.github/scripts/playwright-install.sh b/noir/noir-repo/.github/scripts/playwright-install.sh index 3e65219346d7..d22b4c3d1a68 100755 --- a/noir/noir-repo/.github/scripts/playwright-install.sh +++ b/noir/noir-repo/.github/scripts/playwright-install.sh @@ -1,4 +1,4 @@ #!/bin/bash set -eu -npx -y playwright@1.50 install --with-deps +npx -y playwright@1.49 install --with-deps diff --git a/noir/noir-repo/yarn.lock b/noir/noir-repo/yarn.lock index 811dd9328af9..e831b9c476a6 100644 --- a/noir/noir-repo/yarn.lock +++ b/noir/noir-repo/yarn.lock @@ -14924,7 +14924,7 @@ __metadata: languageName: node linkType: hard -"fsevents@patch:fsevents@2.3.2#~builtin": +"fsevents@patch:fsevents@npm%3A2.3.2#~builtin": version: 2.3.2 resolution: "fsevents@patch:fsevents@npm%3A2.3.2#~builtin::version=2.3.2&hash=df0bf1" dependencies: @@ -20321,27 +20321,27 @@ __metadata: languageName: node linkType: hard -"playwright-core@npm:1.50.0": - version: 1.50.0 - resolution: "playwright-core@npm:1.50.0" +"playwright-core@npm:1.49.0": + version: 1.49.0 + resolution: "playwright-core@npm:1.49.0" bin: playwright-core: cli.js - checksum: aca5222d7859039bc579b4b860db57c8adc1cc94c3de990ed08cec911bf888e2decb331560bd456991c98222a55c58526187a2a070e6f101fbef43a8e07e1dea + checksum: d8423ad0cab2e672856529bf6b98b406e7e605da098b847b9b54ee8ebd8d716ed8880a9afff4b38f0a2e3f59b95661c74589116ce3ff2b5e0ae3561507086c94 languageName: node linkType: hard "playwright@npm:^1.22.2": - version: 1.50.0 - resolution: "playwright@npm:1.50.0" + version: 1.49.0 + resolution: "playwright@npm:1.49.0" dependencies: - fsevents: 2.3.2 - playwright-core: 1.50.0 + fsevents: "npm:2.3.2" + playwright-core: "npm:1.49.0" dependenciesMeta: fsevents: optional: true bin: playwright: cli.js - checksum: 44004e3082433f6024665fcf04bd37cda2b284bd5262682a40a60c66943ccf66f68fbc9ca859908dfd0d117235424580a55e9ccd07e2ad9c30df363b6445448b + checksum: f1bfb2fff65cad2ce996edab74ec231dfd21aeb5961554b765ce1eaec27efb87eaba37b00e91ecd27727b82861e5d8c230abe4960e93f6ada8be5ad1020df306 languageName: node linkType: hard diff --git a/yarn-project/Dockerfile b/yarn-project/Dockerfile new file mode 100644 index 000000000000..f21cc3bc968d --- /dev/null +++ b/yarn-project/Dockerfile @@ -0,0 +1,67 @@ +FROM --platform=linux/amd64 aztecprotocol/bb.js as bb.js +FROM --platform=linux/amd64 aztecprotocol/noir-packages as noir-packages +FROM --platform=linux/amd64 aztecprotocol/l1-contracts as contracts +FROM --platform=linux/amd64 aztecprotocol/noir-projects as noir-projects +FROM aztecprotocol/noir as noir +# we don't build the bb binary for arm so this will be copied but won't be working on arm images +FROM --platform=linux/amd64 aztecprotocol/barretenberg-x86_64-linux-clang as barretenberg + +FROM node:18.19.0 as builder +RUN apt update && apt install -y jq curl perl && rm -rf /var/lib/apt/lists/* && apt-get clean + +# Copy in portalled packages. +COPY --from=bb.js /usr/src/barretenberg/ts /usr/src/barretenberg/ts +COPY --from=noir-packages /usr/src/noir/packages /usr/src/noir/packages +COPY --from=contracts /usr/src/l1-contracts /usr/src/l1-contracts +COPY --from=noir-projects /usr/src/noir-projects /usr/src/noir-projects +# We want the native ACVM and BB binaries +COPY --from=noir /usr/src/noir/noir-repo/target/release/acvm /usr/src/noir/noir-repo/target/release/acvm +COPY --from=barretenberg /usr/src/barretenberg/cpp/build/bin/bb /usr/src/barretenberg/cpp/build/bin/bb +COPY --from=barretenberg /usr/src/barretenberg/cpp/build-pic/lib/nodejs_module.node /usr/src/barretenberg/cpp/build-pic/lib/nodejs_module.node + +WORKDIR /usr/src/yarn-project +COPY . . + +# We install a symlink to yarn-project's node_modules at a location that all portalled packages can find as they +# walk up the tree as part of module resolution. The supposedly idiomatic way of supporting module resolution +# correctly for portalled packages, is to use --preserve-symlinks when running node. +# This does kind of work, but jest doesn't honor it correctly, so this seems like a neat workaround. +# Also, --preserve-symlinks causes duplication of portalled instances such as bb.js, and breaks the singleton logic +# by initialising the module more than once. So at present I don't see a viable alternative. +RUN ln -s /usr/src/yarn-project/node_modules /usr/src/node_modules + +# TODO: Replace puppeteer with puppeteer-core to avoid this. +ENV PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=true + +RUN ./bootstrap.sh + +ENV BB_BINARY_PATH=/usr/src/barretenberg/cpp/build/bin/bb +ENV BB_WORKING_DIRECTORY=/usr/src/yarn-project/bb +ENV ACVM_BINARY_PATH=/usr/src/noir/noir-repo/target/release/acvm +ENV ACVM_WORKING_DIRECTORY=/usr/src/yarn-project/acvm + +RUN mkdir -p $BB_WORKING_DIRECTORY $ACVM_WORKING_DIRECTORY && \ + test $(arch) = "x86_64" && \ + echo -n RootRollupArtifact PrivateKernelTailArtifact PrivateKernelTailToPublicArtifact | xargs -d ' ' -P 3 -I {} node bb-prover/dest/bb/index.js write-vk -c {} && \ + node bb-prover/dest/bb/index.js write-contract -c RootRollupArtifact -n UltraHonkVerifier.sol || \ + echo "Skipping VK generation arch=$(arch)" + +RUN yarn workspaces focus @aztec/aztec @aztec/cli-wallet --production && yarn cache clean + +# TODO: Use release-please to update package.json directly, and remove this! +# It's here to ensure the image rebuilds if the commit tag changes (as the content hash won't). +# ARG COMMIT_TAG="" +# RUN ./scripts/version_packages.sh + +# We no longer need these. +RUN rm -rf /usr/src/noir-projects /usr/src/l1-contracts + +# Create minimal size image. +FROM node:18.19.1-slim +ARG COMMIT_TAG="" +ENV COMMIT_TAG=$COMMIT_TAG +COPY --from=builder /usr/src /usr/src +WORKDIR /usr/src/yarn-project +# add curl to be able to download CRS file +RUN apt update && apt install -y curl jq +ENTRYPOINT ["yarn"] diff --git a/yarn-project/archiver/src/archiver/archiver.ts b/yarn-project/archiver/src/archiver/archiver.ts index 870e3f39a06f..47427f9d246f 100644 --- a/yarn-project/archiver/src/archiver/archiver.ts +++ b/yarn-project/archiver/src/archiver/archiver.ts @@ -1139,7 +1139,7 @@ class ArchiverStoreHelper getTotalL1ToL2MessageCount(): Promise { return this.store.getTotalL1ToL2MessageCount(); } - estimateSize(): { mappingSize: number; actualSize: number; numItems: number } { + estimateSize(): Promise<{ mappingSize: number; actualSize: number; numItems: number }> { return this.store.estimateSize(); } } diff --git a/yarn-project/archiver/src/archiver/archiver_store.ts b/yarn-project/archiver/src/archiver/archiver_store.ts index 596f7818a660..30364404e1c1 100644 --- a/yarn-project/archiver/src/archiver/archiver_store.ts +++ b/yarn-project/archiver/src/archiver/archiver_store.ts @@ -271,5 +271,5 @@ export interface ArchiverDataStore { /** * Estimates the size of the store in bytes. */ - estimateSize(): { mappingSize: number; actualSize: number; numItems: number }; + estimateSize(): Promise<{ mappingSize: number; actualSize: number; numItems: number }>; } diff --git a/yarn-project/archiver/src/archiver/archiver_store_test_suite.ts b/yarn-project/archiver/src/archiver/archiver_store_test_suite.ts index a702fb50b9bf..dea8335c5365 100644 --- a/yarn-project/archiver/src/archiver/archiver_store_test_suite.ts +++ b/yarn-project/archiver/src/archiver/archiver_store_test_suite.ts @@ -30,7 +30,10 @@ import { type L1Published } from './structs/published.js'; * @param testName - The name of the test suite. * @param getStore - Returns an instance of a store that's already been initialized. */ -export function describeArchiverDataStore(testName: string, getStore: () => ArchiverDataStore) { +export function describeArchiverDataStore( + testName: string, + getStore: () => ArchiverDataStore | Promise, +) { describe(testName, () => { let store: ArchiverDataStore; let blocks: L1Published[]; @@ -52,7 +55,7 @@ export function describeArchiverDataStore(testName: string, getStore: () => Arch }); beforeEach(async () => { - store = getStore(); + store = await getStore(); blocks = await timesParallel(10, async i => makeL1Published(await L2Block.random(i + 1), i + 10)); }); diff --git a/yarn-project/archiver/src/archiver/kv_archiver_store/block_store.ts b/yarn-project/archiver/src/archiver/kv_archiver_store/block_store.ts index b6574b5f8fac..2cf6b46b3d39 100644 --- a/yarn-project/archiver/src/archiver/kv_archiver_store/block_store.ts +++ b/yarn-project/archiver/src/archiver/kv_archiver_store/block_store.ts @@ -1,7 +1,8 @@ import { Body, type InBlock, L2Block, L2BlockHash, type TxEffect, type TxHash, TxReceipt } from '@aztec/circuit-types'; import { AppendOnlyTreeSnapshot, type AztecAddress, BlockHeader, INITIAL_L2_BLOCK_NUM } from '@aztec/circuits.js'; +import { toArray } from '@aztec/foundation/iterable'; import { createLogger } from '@aztec/foundation/log'; -import { type AztecKVStore, type AztecMap, type AztecSingleton, type Range } from '@aztec/kv-store'; +import type { AztecAsyncKVStore, AztecAsyncMap, AztecAsyncSingleton, Range } from '@aztec/kv-store'; import { type L1Published, type L1PublishedData } from '../structs/published.js'; @@ -18,29 +19,29 @@ type BlockStorage = { */ export class BlockStore { /** Map block number to block data */ - #blocks: AztecMap; + #blocks: AztecAsyncMap; /** Map block hash to block body */ - #blockBodies: AztecMap; + #blockBodies: AztecAsyncMap; /** Stores L1 block number in which the last processed L2 block was included */ - #lastSynchedL1Block: AztecSingleton; + #lastSynchedL1Block: AztecAsyncSingleton; /** Stores l2 block number of the last proven block */ - #lastProvenL2Block: AztecSingleton; + #lastProvenL2Block: AztecAsyncSingleton; /** Stores l2 epoch number of the last proven epoch */ - #lastProvenL2Epoch: AztecSingleton; + #lastProvenL2Epoch: AztecAsyncSingleton; /** Index mapping transaction hash (as a string) to its location in a block */ - #txIndex: AztecMap; + #txIndex: AztecAsyncMap; /** Index mapping a contract's address (as a string) to its location in a block */ - #contractIndex: AztecMap; + #contractIndex: AztecAsyncMap; #log = createLogger('archiver:block_store'); - constructor(private db: AztecKVStore) { + constructor(private db: AztecAsyncKVStore) { this.#blocks = db.openMap('archiver_blocks'); this.#blockBodies = db.openMap('archiver_block_bodies'); this.#txIndex = db.openMap('archiver_tx_index'); @@ -60,23 +61,23 @@ export class BlockStore { return true; } - return await this.db.transaction(async () => { + return await this.db.transactionAsync(async () => { for (const block of blocks) { - void this.#blocks.set(block.data.number, { + await this.#blocks.set(block.data.number, { header: block.data.header.toBuffer(), archive: block.data.archive.toBuffer(), l1: block.l1, }); - block.data.body.txEffects.forEach((tx, i) => { - void this.#txIndex.set(tx.txHash.toString(), [block.data.number, i]); - }); + for (let i = 0; i < block.data.body.txEffects.length; i++) { + const txEffect = block.data.body.txEffects[i]; + await this.#txIndex.set(txEffect.txHash.toString(), [block.data.number, i]); + } - void this.#blockBodies.set((await block.data.hash()).toString(), block.data.body.toBuffer()); + await this.#blockBodies.set((await block.data.hash()).toString(), block.data.body.toBuffer()); } - void this.#lastSynchedL1Block.set(blocks[blocks.length - 1].l1.blockNumber); - + await this.#lastSynchedL1Block.set(blocks[blocks.length - 1].l1.blockNumber); return true; }); } @@ -89,9 +90,9 @@ export class BlockStore { * @returns True if the operation is successful */ async unwindBlocks(from: number, blocksToUnwind: number) { - return await this.db.transaction(async () => { - const last = this.getSynchedL2BlockNumber(); - if (from != last) { + return await this.db.transactionAsync(async () => { + const last = await this.getSynchedL2BlockNumber(); + if (from !== last) { throw new Error(`Can only unwind blocks from the tip (requested ${from} but current tip is ${last})`); } @@ -102,12 +103,10 @@ export class BlockStore { if (block === undefined) { throw new Error(`Cannot remove block ${blockNumber} from the store, we don't have it`); } - void this.#blocks.delete(block.data.number); - block.data.body.txEffects.forEach(tx => { - void this.#txIndex.delete(tx.txHash.toString()); - }); + await this.#blocks.delete(block.data.number); + await Promise.all(block.data.body.txEffects.map(tx => this.#txIndex.delete(tx.txHash.toString()))); const blockHash = (await block.data.hash()).toString(); - void this.#blockBodies.delete(blockHash); + await this.#blockBodies.delete(blockHash); this.#log.debug(`Unwound block ${blockNumber} ${blockHash}`); } @@ -122,7 +121,7 @@ export class BlockStore { * @returns The requested L2 blocks */ async *getBlocks(start: number, limit: number): AsyncIterableIterator> { - for (const blockStorage of this.#blocks.values(this.#computeBlockRange(start, limit))) { + for await (const blockStorage of this.#blocks.valuesAsync(this.#computeBlockRange(start, limit))) { const block = await this.getBlockFromBlockStorage(blockStorage); yield block; } @@ -133,8 +132,8 @@ export class BlockStore { * @param blockNumber - The number of the block to return. * @returns The requested L2 block. */ - getBlock(blockNumber: number): Promise | undefined> { - const blockStorage = this.#blocks.get(blockNumber); + async getBlock(blockNumber: number): Promise | undefined> { + const blockStorage = await this.#blocks.getAsync(blockNumber); if (!blockStorage || !blockStorage.header) { return Promise.resolve(undefined); } @@ -148,8 +147,8 @@ export class BlockStore { * @param limit - The number of blocks to return. * @returns The requested L2 block headers */ - *getBlockHeaders(start: number, limit: number): IterableIterator { - for (const blockStorage of this.#blocks.values(this.#computeBlockRange(start, limit))) { + async *getBlockHeaders(start: number, limit: number): AsyncIterableIterator { + for await (const blockStorage of this.#blocks.valuesAsync(this.#computeBlockRange(start, limit))) { yield BlockHeader.fromBuffer(blockStorage.header); } } @@ -158,7 +157,7 @@ export class BlockStore { const header = BlockHeader.fromBuffer(blockStorage.header); const archive = AppendOnlyTreeSnapshot.fromBuffer(blockStorage.archive); const blockHash = (await header.hash()).toString(); - const blockBodyBuffer = this.#blockBodies.get(blockHash); + const blockBodyBuffer = await this.#blockBodies.getAsync(blockHash); if (blockBodyBuffer === undefined) { throw new Error( `Could not retrieve body for block ${header.globalVariables.blockNumber.toNumber()} ${blockHash}`, @@ -176,7 +175,7 @@ export class BlockStore { * @returns The requested tx effect (or undefined if not found). */ async getTxEffect(txHash: TxHash): Promise | undefined> { - const [blockNumber, txIndex] = this.getTxLocation(txHash) ?? []; + const [blockNumber, txIndex] = (await this.getTxLocation(txHash)) ?? []; if (typeof blockNumber !== 'number' || typeof txIndex !== 'number') { return undefined; } @@ -199,7 +198,7 @@ export class BlockStore { * @returns The requested tx receipt (or undefined if not found). */ async getSettledTxReceipt(txHash: TxHash): Promise { - const [blockNumber, txIndex] = this.getTxLocation(txHash) ?? []; + const [blockNumber, txIndex] = (await this.getTxLocation(txHash)) ?? []; if (typeof blockNumber !== 'number' || typeof txIndex !== 'number') { return undefined; } @@ -222,8 +221,8 @@ export class BlockStore { * @param txHash - The txHash of the tx. * @returns The block number and index of the tx. */ - getTxLocation(txHash: TxHash): [blockNumber: number, txIndex: number] | undefined { - return this.#txIndex.get(txHash.toString()); + getTxLocation(txHash: TxHash): Promise<[blockNumber: number, txIndex: number] | undefined> { + return this.#txIndex.getAsync(txHash.toString()); } /** @@ -231,16 +230,16 @@ export class BlockStore { * @param contractAddress - The address of the contract to look up. * @returns The block number and index of the contract. */ - getContractLocation(contractAddress: AztecAddress): [blockNumber: number, index: number] | undefined { - return this.#contractIndex.get(contractAddress.toString()); + getContractLocation(contractAddress: AztecAddress): Promise<[blockNumber: number, index: number] | undefined> { + return this.#contractIndex.getAsync(contractAddress.toString()); } /** * Gets the number of the latest L2 block processed. * @returns The number of the latest L2 block processed. */ - getSynchedL2BlockNumber(): number { - const [lastBlockNumber] = this.#blocks.keys({ reverse: true, limit: 1 }); + async getSynchedL2BlockNumber(): Promise { + const [lastBlockNumber] = await toArray(this.#blocks.keysAsync({ reverse: true, limit: 1 })); return typeof lastBlockNumber === 'number' ? lastBlockNumber : INITIAL_L2_BLOCK_NUM - 1; } @@ -248,31 +247,31 @@ export class BlockStore { * Gets the most recent L1 block processed. * @returns The L1 block that published the latest L2 block */ - getSynchedL1BlockNumber(): bigint | undefined { - return this.#lastSynchedL1Block.get(); + getSynchedL1BlockNumber(): Promise { + return this.#lastSynchedL1Block.getAsync(); } setSynchedL1BlockNumber(l1BlockNumber: bigint) { - void this.#lastSynchedL1Block.set(l1BlockNumber); + return this.#lastSynchedL1Block.set(l1BlockNumber); } - getProvenL2BlockNumber(): number { - return this.#lastProvenL2Block.get() ?? 0; + async getProvenL2BlockNumber(): Promise { + return (await this.#lastProvenL2Block.getAsync()) ?? 0; } setProvenL2BlockNumber(blockNumber: number) { - void this.#lastProvenL2Block.set(blockNumber); + return this.#lastProvenL2Block.set(blockNumber); } - getProvenL2EpochNumber(): number | undefined { - return this.#lastProvenL2Epoch.get(); + getProvenL2EpochNumber(): Promise { + return this.#lastProvenL2Epoch.getAsync(); } setProvenL2EpochNumber(epochNumber: number) { - void this.#lastProvenL2Epoch.set(epochNumber); + return this.#lastProvenL2Epoch.set(epochNumber); } - #computeBlockRange(start: number, limit: number): Required, 'start' | 'end'>> { + #computeBlockRange(start: number, limit: number): Required, 'start' | 'limit'>> { if (limit < 1) { throw new Error(`Invalid limit: ${limit}`); } @@ -281,7 +280,6 @@ export class BlockStore { throw new Error(`Invalid start: ${start}`); } - const end = start + limit; - return { start, end }; + return { start, limit }; } } diff --git a/yarn-project/archiver/src/archiver/kv_archiver_store/contract_class_store.ts b/yarn-project/archiver/src/archiver/kv_archiver_store/contract_class_store.ts index c5a87590dfac..092bdaa6940d 100644 --- a/yarn-project/archiver/src/archiver/kv_archiver_store/contract_class_store.ts +++ b/yarn-project/archiver/src/archiver/kv_archiver_store/contract_class_store.ts @@ -7,17 +7,18 @@ import { type UnconstrainedFunctionWithMembershipProof, Vector, } from '@aztec/circuits.js'; +import { toArray } from '@aztec/foundation/iterable'; import { BufferReader, numToUInt8, serializeToBuffer } from '@aztec/foundation/serialize'; -import { type AztecKVStore, type AztecMap } from '@aztec/kv-store'; +import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store'; /** * LMDB implementation of the ArchiverDataStore interface. */ export class ContractClassStore { - #contractClasses: AztecMap; - #bytecodeCommitments: AztecMap; + #contractClasses: AztecAsyncMap; + #bytecodeCommitments: AztecAsyncMap; - constructor(private db: AztecKVStore) { + constructor(private db: AztecAsyncKVStore) { this.#contractClasses = db.openMap('archiver_contract_classes'); this.#bytecodeCommitments = db.openMap('archiver_bytecode_commitments'); } @@ -35,25 +36,25 @@ export class ContractClassStore { } async deleteContractClasses(contractClass: ContractClassPublic, blockNumber: number): Promise { - const restoredContractClass = this.#contractClasses.get(contractClass.id.toString()); + const restoredContractClass = await this.#contractClasses.getAsync(contractClass.id.toString()); if (restoredContractClass && deserializeContractClassPublic(restoredContractClass).l2BlockNumber >= blockNumber) { await this.#contractClasses.delete(contractClass.id.toString()); await this.#bytecodeCommitments.delete(contractClass.id.toString()); } } - getContractClass(id: Fr): ContractClassPublic | undefined { - const contractClass = this.#contractClasses.get(id.toString()); + async getContractClass(id: Fr): Promise { + const contractClass = await this.#contractClasses.getAsync(id.toString()); return contractClass && { ...deserializeContractClassPublic(contractClass), id }; } - getBytecodeCommitment(id: Fr): Fr | undefined { - const value = this.#bytecodeCommitments.get(id.toString()); + async getBytecodeCommitment(id: Fr): Promise { + const value = await this.#bytecodeCommitments.getAsync(id.toString()); return value === undefined ? undefined : Fr.fromBuffer(value); } - getContractClassIds(): Fr[] { - return Array.from(this.#contractClasses.keys()).map(key => Fr.fromHexString(key)); + async getContractClassIds(): Promise { + return (await toArray(this.#contractClasses.keysAsync())).map(key => Fr.fromHexString(key)); } async addFunctions( @@ -61,8 +62,8 @@ export class ContractClassStore { newPrivateFunctions: ExecutablePrivateFunctionWithMembershipProof[], newUnconstrainedFunctions: UnconstrainedFunctionWithMembershipProof[], ): Promise { - await this.db.transaction(() => { - const existingClassBuffer = this.#contractClasses.get(contractClassId.toString()); + await this.db.transactionAsync(async () => { + const existingClassBuffer = await this.#contractClasses.getAsync(contractClassId.toString()); if (!existingClassBuffer) { throw new Error(`Unknown contract class ${contractClassId} when adding private functions to store`); } @@ -83,9 +84,10 @@ export class ContractClassStore { ), ], }; - void this.#contractClasses.set(contractClassId.toString(), serializeContractClassPublic(updatedClass)); + await this.#contractClasses.set(contractClassId.toString(), serializeContractClassPublic(updatedClass)); }); - return Promise.resolve(true); + + return true; } } diff --git a/yarn-project/archiver/src/archiver/kv_archiver_store/contract_instance_store.ts b/yarn-project/archiver/src/archiver/kv_archiver_store/contract_instance_store.ts index 194d52227637..4e1818f7e24c 100644 --- a/yarn-project/archiver/src/archiver/kv_archiver_store/contract_instance_store.ts +++ b/yarn-project/archiver/src/archiver/kv_archiver_store/contract_instance_store.ts @@ -1,13 +1,13 @@ import { type AztecAddress, type ContractInstanceWithAddress, SerializableContractInstance } from '@aztec/circuits.js'; -import { type AztecKVStore, type AztecMap } from '@aztec/kv-store'; +import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store'; /** * LMDB implementation of the ArchiverDataStore interface. */ export class ContractInstanceStore { - #contractInstances: AztecMap; + #contractInstances: AztecAsyncMap; - constructor(db: AztecKVStore) { + constructor(db: AztecAsyncKVStore) { this.#contractInstances = db.openMap('archiver_contract_instances'); } @@ -22,8 +22,8 @@ export class ContractInstanceStore { return this.#contractInstances.delete(contractInstance.address.toString()); } - getContractInstance(address: AztecAddress): ContractInstanceWithAddress | undefined { - const contractInstance = this.#contractInstances.get(address.toString()); + async getContractInstance(address: AztecAddress): Promise { + const contractInstance = await this.#contractInstances.getAsync(address.toString()); return contractInstance && SerializableContractInstance.fromBuffer(contractInstance).withAddress(address); } } diff --git a/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.test.ts b/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.test.ts index d361f91c1397..9f918f47b96b 100644 --- a/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.test.ts +++ b/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.test.ts @@ -1,4 +1,4 @@ -import { openTmpStore } from '@aztec/kv-store/lmdb'; +import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; import { describeArchiverDataStore } from '../archiver_store_test_suite.js'; import { KVArchiverDataStore } from './kv_archiver_store.js'; @@ -6,8 +6,8 @@ import { KVArchiverDataStore } from './kv_archiver_store.js'; describe('KVArchiverDataStore', () => { let archiverStore: KVArchiverDataStore; - beforeEach(() => { - archiverStore = new KVArchiverDataStore(openTmpStore()); + beforeEach(async () => { + archiverStore = new KVArchiverDataStore(await openTmpStore('archiver_test')); }); describeArchiverDataStore('ArchiverStore', () => archiverStore); diff --git a/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.ts b/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.ts index 91c310d4459c..f392a1619cd5 100644 --- a/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.ts +++ b/yarn-project/archiver/src/archiver/kv_archiver_store/kv_archiver_store.ts @@ -22,7 +22,7 @@ import { FunctionSelector } from '@aztec/foundation/abi'; import { type AztecAddress } from '@aztec/foundation/aztec-address'; import { toArray } from '@aztec/foundation/iterable'; import { createLogger } from '@aztec/foundation/log'; -import { type AztecKVStore } from '@aztec/kv-store'; +import { type AztecAsyncKVStore, type StoreSize } from '@aztec/kv-store'; import { type ArchiverDataStore, type ArchiverL1SynchPoint } from '../archiver_store.js'; import { type DataRetrieval } from '../structs/data_retrieval.js'; @@ -48,7 +48,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { #log = createLogger('archiver:data-store'); - constructor(private db: AztecKVStore, logsMaxPageSize: number = 1000) { + constructor(private db: AztecAsyncKVStore, logsMaxPageSize: number = 1000) { this.#blockStore = new BlockStore(db); this.#logStore = new LogStore(db, this.#blockStore, logsMaxPageSize); this.#messageStore = new MessageStore(db); @@ -76,16 +76,16 @@ export class KVArchiverDataStore implements ArchiverDataStore { } getContractClass(id: Fr): Promise { - return Promise.resolve(this.#contractClassStore.getContractClass(id)); + return this.#contractClassStore.getContractClass(id); } getContractClassIds(): Promise { - return Promise.resolve(this.#contractClassStore.getContractClassIds()); + return this.#contractClassStore.getContractClassIds(); } getContractInstance(address: AztecAddress): Promise { const contract = this.#contractInstanceStore.getContractInstance(address); - return Promise.resolve(contract); + return contract; } async addContractClasses( @@ -107,7 +107,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { } getBytecodeCommitment(contractClassId: Fr): Promise { - return Promise.resolve(this.#contractClassStore.getBytecodeCommitment(contractClassId)); + return this.#contractClassStore.getBytecodeCommitment(contractClassId); } addFunctions( @@ -165,12 +165,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { * @returns The requested L2 blocks */ getBlockHeaders(start: number, limit: number): Promise { - try { - return Promise.resolve(Array.from(this.#blockStore.getBlockHeaders(start, limit))); - } catch (err) { - // this function is sync so if any errors are thrown we need to make sure they're passed on as rejected Promises - return Promise.reject(err); - } + return toArray(this.#blockStore.getBlockHeaders(start, limit)); } /** @@ -179,7 +174,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { * @returns The requested tx effect (or undefined if not found). */ getTxEffect(txHash: TxHash) { - return Promise.resolve(this.#blockStore.getTxEffect(txHash)); + return this.#blockStore.getTxEffect(txHash); } /** @@ -188,7 +183,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { * @returns The requested tx receipt (or undefined if not found). */ getSettledTxReceipt(txHash: TxHash): Promise { - return Promise.resolve(this.#blockStore.getSettledTxReceipt(txHash)); + return this.#blockStore.getSettledTxReceipt(txHash); } /** @@ -222,7 +217,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { } getTotalL1ToL2MessageCount(): Promise { - return Promise.resolve(this.#messageStore.getTotalL1ToL2MessageCount()); + return this.#messageStore.getTotalL1ToL2MessageCount(); } /** @@ -231,7 +226,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { * @returns True if the operation is successful. */ addL1ToL2Messages(messages: DataRetrieval): Promise { - return Promise.resolve(this.#messageStore.addL1ToL2Messages(messages)); + return this.#messageStore.addL1ToL2Messages(messages); } /** @@ -240,7 +235,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { * @returns The index of the L1 to L2 message in the L1 to L2 message tree (undefined if not found). */ getL1ToL2MessageIndex(l1ToL2Message: Fr): Promise { - return Promise.resolve(this.#messageStore.getL1ToL2MessageIndex(l1ToL2Message)); + return this.#messageStore.getL1ToL2MessageIndex(l1ToL2Message); } /** @@ -249,11 +244,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { * @returns The L1 to L2 messages/leaves of the messages subtree (throws if not found). */ getL1ToL2Messages(blockNumber: bigint): Promise { - try { - return Promise.resolve(this.#messageStore.getL1ToL2Messages(blockNumber)); - } catch (err) { - return Promise.reject(err); - } + return this.#messageStore.getL1ToL2Messages(blockNumber); } /** @@ -263,11 +254,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { * @returns An array of private logs from the specified range of blocks. */ getPrivateLogs(from: number, limit: number): Promise { - try { - return Promise.resolve(Array.from(this.#logStore.getPrivateLogs(from, limit))); - } catch (err) { - return Promise.reject(err); - } + return this.#logStore.getPrivateLogs(from, limit); } /** @@ -291,7 +278,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { */ getPublicLogs(filter: LogFilter): Promise { try { - return Promise.resolve(this.#logStore.getPublicLogs(filter)); + return this.#logStore.getPublicLogs(filter); } catch (err) { return Promise.reject(err); } @@ -304,7 +291,7 @@ export class KVArchiverDataStore implements ArchiverDataStore { */ getContractClassLogs(filter: LogFilter): Promise { try { - return Promise.resolve(this.#logStore.getContractClassLogs(filter)); + return this.#logStore.getContractClassLogs(filter); } catch (err) { return Promise.reject(err); } @@ -315,48 +302,48 @@ export class KVArchiverDataStore implements ArchiverDataStore { * @returns The number of the latest L2 block processed. */ getSynchedL2BlockNumber(): Promise { - return Promise.resolve(this.#blockStore.getSynchedL2BlockNumber()); + return this.#blockStore.getSynchedL2BlockNumber(); } getProvenL2BlockNumber(): Promise { - return Promise.resolve(this.#blockStore.getProvenL2BlockNumber()); + return this.#blockStore.getProvenL2BlockNumber(); } getProvenL2EpochNumber(): Promise { - return Promise.resolve(this.#blockStore.getProvenL2EpochNumber()); + return this.#blockStore.getProvenL2EpochNumber(); } - setProvenL2BlockNumber(blockNumber: number) { - this.#blockStore.setProvenL2BlockNumber(blockNumber); - return Promise.resolve(); + async setProvenL2BlockNumber(blockNumber: number) { + await this.#blockStore.setProvenL2BlockNumber(blockNumber); } - setProvenL2EpochNumber(epochNumber: number) { - this.#blockStore.setProvenL2EpochNumber(epochNumber); - return Promise.resolve(); + async setProvenL2EpochNumber(epochNumber: number) { + await this.#blockStore.setProvenL2EpochNumber(epochNumber); } - setBlockSynchedL1BlockNumber(l1BlockNumber: bigint) { - this.#blockStore.setSynchedL1BlockNumber(l1BlockNumber); - return Promise.resolve(); + async setBlockSynchedL1BlockNumber(l1BlockNumber: bigint) { + await this.#blockStore.setSynchedL1BlockNumber(l1BlockNumber); } - setMessageSynchedL1BlockNumber(l1BlockNumber: bigint) { - this.#messageStore.setSynchedL1BlockNumber(l1BlockNumber); - return Promise.resolve(); + async setMessageSynchedL1BlockNumber(l1BlockNumber: bigint) { + await this.#messageStore.setSynchedL1BlockNumber(l1BlockNumber); } /** * Gets the last L1 block number processed by the archiver */ - getSynchPoint(): Promise { - return Promise.resolve({ - blocksSynchedTo: this.#blockStore.getSynchedL1BlockNumber(), - messagesSynchedTo: this.#messageStore.getSynchedL1BlockNumber(), - }); - } - - public estimateSize(): { mappingSize: number; actualSize: number; numItems: number } { + async getSynchPoint(): Promise { + const [blocksSynchedTo, messagesSynchedTo] = await Promise.all([ + this.#blockStore.getSynchedL1BlockNumber(), + this.#messageStore.getSynchedL1BlockNumber(), + ]); + return { + blocksSynchedTo, + messagesSynchedTo, + }; + } + + public estimateSize(): Promise { return this.db.estimateSize(); } } diff --git a/yarn-project/archiver/src/archiver/kv_archiver_store/log_store.ts b/yarn-project/archiver/src/archiver/kv_archiver_store/log_store.ts index c1821c09555d..2c9d6d8a04a7 100644 --- a/yarn-project/archiver/src/archiver/kv_archiver_store/log_store.ts +++ b/yarn-project/archiver/src/archiver/kv_archiver_store/log_store.ts @@ -18,7 +18,7 @@ import { } from '@aztec/circuits.js/constants'; import { createLogger } from '@aztec/foundation/log'; import { BufferReader, numToUInt32BE } from '@aztec/foundation/serialize'; -import { type AztecKVStore, type AztecMap } from '@aztec/kv-store'; +import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store'; import { type BlockStore } from './block_store.js'; @@ -26,15 +26,15 @@ import { type BlockStore } from './block_store.js'; * A store for logs */ export class LogStore { - #logsByTag: AztecMap; - #logTagsByBlock: AztecMap; - #privateLogsByBlock: AztecMap; - #publicLogsByBlock: AztecMap; - #contractClassLogsByBlock: AztecMap; + #logsByTag: AztecAsyncMap; + #logTagsByBlock: AztecAsyncMap; + #privateLogsByBlock: AztecAsyncMap; + #publicLogsByBlock: AztecAsyncMap; + #contractClassLogsByBlock: AztecAsyncMap; #logsMaxPageSize: number; #log = createLogger('archiver:log_store'); - constructor(private db: AztecKVStore, private blockStore: BlockStore, logsMaxPageSize: number = 1000) { + constructor(private db: AztecAsyncKVStore, private blockStore: BlockStore, logsMaxPageSize: number = 1000) { this.#logsByTag = db.openMap('archiver_tagged_logs_by_tag'); this.#logTagsByBlock = db.openMap('archiver_log_tags_by_block'); this.#privateLogsByBlock = db.openMap('archiver_private_logs_by_block'); @@ -125,7 +125,7 @@ export class LogStore { * @param blocks - The blocks for which to add the logs. * @returns True if the operation is successful. */ - async addLogs(blocks: L2Block[]): Promise { + addLogs(blocks: L2Block[]): Promise { const taggedLogsToAdd = blocks .flatMap(block => [this.#extractTaggedLogsFromPrivate(block), this.#extractTaggedLogsFromPublic(block)]) .reduce((acc, val) => { @@ -136,31 +136,32 @@ export class LogStore { return acc; }); const tagsToUpdate = Array.from(taggedLogsToAdd.keys()); - const currentTaggedLogs = await this.db.transaction(() => - tagsToUpdate.map(tag => ({ tag, logBuffers: this.#logsByTag.get(tag) })), - ); - currentTaggedLogs.forEach(taggedLogBuffer => { - if (taggedLogBuffer.logBuffers && taggedLogBuffer.logBuffers.length > 0) { - taggedLogsToAdd.set( - taggedLogBuffer.tag, - taggedLogBuffer.logBuffers!.concat(taggedLogsToAdd.get(taggedLogBuffer.tag)!), - ); - } - }); - return this.db.transaction(() => { - blocks.forEach(block => { + + return this.db.transactionAsync(async () => { + const currentTaggedLogs = await Promise.all( + tagsToUpdate.map(async tag => ({ tag, logBuffers: await this.#logsByTag.getAsync(tag) })), + ); + currentTaggedLogs.forEach(taggedLogBuffer => { + if (taggedLogBuffer.logBuffers && taggedLogBuffer.logBuffers.length > 0) { + taggedLogsToAdd.set( + taggedLogBuffer.tag, + taggedLogBuffer.logBuffers!.concat(taggedLogsToAdd.get(taggedLogBuffer.tag)!), + ); + } + }); + for (const block of blocks) { const tagsInBlock = []; for (const [tag, logs] of taggedLogsToAdd.entries()) { - void this.#logsByTag.set(tag, logs); + await this.#logsByTag.set(tag, logs); tagsInBlock.push(tag); } - void this.#logTagsByBlock.set(block.number, tagsInBlock); + await this.#logTagsByBlock.set(block.number, tagsInBlock); const privateLogsInBlock = block.body.txEffects .map(txEffect => txEffect.privateLogs) .flat() .map(log => log.toBuffer()); - void this.#privateLogsByBlock.set(block.number, Buffer.concat(privateLogsInBlock)); + await this.#privateLogsByBlock.set(block.number, Buffer.concat(privateLogsInBlock)); const publicLogsInBlock = block.body.txEffects .map((txEffect, txIndex) => @@ -172,29 +173,36 @@ export class LogStore { ) .flat(); - void this.#publicLogsByBlock.set(block.number, Buffer.concat(publicLogsInBlock)); - void this.#contractClassLogsByBlock.set(block.number, block.body.contractClassLogs.toBuffer()); - }); + await this.#publicLogsByBlock.set(block.number, Buffer.concat(publicLogsInBlock)); + await this.#contractClassLogsByBlock.set(block.number, block.body.contractClassLogs.toBuffer()); + } return true; }); } - async deleteLogs(blocks: L2Block[]): Promise { - const tagsToDelete = await this.db.transaction(() => { - return blocks.flatMap(block => this.#logTagsByBlock.get(block.number)?.map(tag => tag.toString()) ?? []); - }); - return this.db.transaction(() => { - blocks.forEach(block => { - void this.#privateLogsByBlock.delete(block.number); - void this.#publicLogsByBlock.delete(block.number); - void this.#logTagsByBlock.delete(block.number); - }); - - tagsToDelete.forEach(tag => { - void this.#logsByTag.delete(tag.toString()); - }); - + deleteLogs(blocks: L2Block[]): Promise { + return this.db.transactionAsync(async () => { + const tagsToDelete = ( + await Promise.all( + blocks.map(async block => { + const tags = await this.#logTagsByBlock.getAsync(block.number); + return tags ?? []; + }), + ) + ).flat(); + + await Promise.all( + blocks.map(block => + Promise.all([ + this.#privateLogsByBlock.delete(block.number), + this.#publicLogsByBlock.delete(block.number), + this.#logTagsByBlock.delete(block.number), + ]), + ), + ); + + await Promise.all(tagsToDelete.map(tag => this.#logsByTag.delete(tag.toString()))); return true; }); } @@ -205,9 +213,9 @@ export class LogStore { * @param limit - The maximum number of blocks to retrieve logs from. * @returns An array of private logs from the specified range of blocks. */ - getPrivateLogs(start: number, limit: number) { + async getPrivateLogs(start: number, limit: number): Promise { const logs = []; - for (const buffer of this.#privateLogsByBlock.values({ start, limit })) { + for await (const buffer of this.#privateLogsByBlock.valuesAsync({ start, limit })) { const reader = new BufferReader(buffer); while (reader.remainingBytes() > 0) { logs.push(reader.readObject(PrivateLog)); @@ -222,11 +230,10 @@ export class LogStore { * @returns For each received tag, an array of matching logs is returned. An empty array implies no logs match * that tag. */ - getLogsByTags(tags: Fr[]): Promise { - return this.db.transaction(() => - tags - .map(tag => this.#logsByTag.get(tag.toString())) - .map(noteLogBuffers => noteLogBuffers?.map(noteLogBuffer => TxScopedL2Log.fromBuffer(noteLogBuffer)) ?? []), + async getLogsByTags(tags: Fr[]): Promise { + const logs = await Promise.all(tags.map(tag => this.#logsByTag.getAsync(tag.toString()))); + return logs.map( + noteLogBuffers => noteLogBuffers?.map(noteLogBuffer => TxScopedL2Log.fromBuffer(noteLogBuffer)) ?? [], ); } @@ -235,7 +242,7 @@ export class LogStore { * @param filter - The filter to apply to the logs. * @returns The requested logs. */ - getPublicLogs(filter: LogFilter): GetPublicLogsResponse { + getPublicLogs(filter: LogFilter): Promise { if (filter.afterLog) { return this.#filterPublicLogsBetweenBlocks(filter); } else if (filter.txHash) { @@ -245,17 +252,17 @@ export class LogStore { } } - #filterPublicLogsOfTx(filter: LogFilter): GetPublicLogsResponse { + async #filterPublicLogsOfTx(filter: LogFilter): Promise { if (!filter.txHash) { throw new Error('Missing txHash'); } - const [blockNumber, txIndex] = this.blockStore.getTxLocation(filter.txHash) ?? []; + const [blockNumber, txIndex] = (await this.blockStore.getTxLocation(filter.txHash)) ?? []; if (typeof blockNumber !== 'number' || typeof txIndex !== 'number') { return { logs: [], maxLogsHit: false }; } - const buffer = this.#publicLogsByBlock.get(blockNumber) ?? Buffer.alloc(0); + const buffer = (await this.#publicLogsByBlock.getAsync(blockNumber)) ?? Buffer.alloc(0); const publicLogsInBlock: [PublicLog[]] = [[]]; const reader = new BufferReader(buffer); while (reader.remainingBytes() > 0) { @@ -275,7 +282,7 @@ export class LogStore { return { logs, maxLogsHit }; } - #filterPublicLogsBetweenBlocks(filter: LogFilter): GetPublicLogsResponse { + async #filterPublicLogsBetweenBlocks(filter: LogFilter): Promise { const start = filter.afterLog?.blockNumber ?? Math.max(filter.fromBlock ?? INITIAL_L2_BLOCK_NUM, INITIAL_L2_BLOCK_NUM); const end = filter.toBlock; @@ -290,7 +297,7 @@ export class LogStore { const logs: ExtendedPublicLog[] = []; let maxLogsHit = false; - loopOverBlocks: for (const [blockNumber, logBuffer] of this.#publicLogsByBlock.entries({ start, end })) { + loopOverBlocks: for await (const [blockNumber, logBuffer] of this.#publicLogsByBlock.entriesAsync({ start, end })) { const publicLogsInBlock: [PublicLog[]] = [[]]; const reader = new BufferReader(logBuffer); while (reader.remainingBytes() > 0) { @@ -319,7 +326,7 @@ export class LogStore { * @param filter - The filter to apply to the logs. * @returns The requested logs. */ - getContractClassLogs(filter: LogFilter): GetContractClassLogsResponse { + getContractClassLogs(filter: LogFilter): Promise { if (filter.afterLog) { return this.#filterContractClassLogsBetweenBlocks(filter); } else if (filter.txHash) { @@ -329,16 +336,16 @@ export class LogStore { } } - #filterContractClassLogsOfTx(filter: LogFilter): GetContractClassLogsResponse { + async #filterContractClassLogsOfTx(filter: LogFilter): Promise { if (!filter.txHash) { throw new Error('Missing txHash'); } - const [blockNumber, txIndex] = this.blockStore.getTxLocation(filter.txHash) ?? []; + const [blockNumber, txIndex] = (await this.blockStore.getTxLocation(filter.txHash)) ?? []; if (typeof blockNumber !== 'number' || typeof txIndex !== 'number') { return { logs: [], maxLogsHit: false }; } - const contractClassLogsBuffer = this.#contractClassLogsByBlock.get(blockNumber); + const contractClassLogsBuffer = await this.#contractClassLogsByBlock.getAsync(blockNumber); const contractClassLogsInBlock = contractClassLogsBuffer ? ContractClass2BlockL2Logs.fromBuffer(contractClassLogsBuffer) : new ContractClass2BlockL2Logs([]); @@ -350,7 +357,7 @@ export class LogStore { return { logs, maxLogsHit }; } - #filterContractClassLogsBetweenBlocks(filter: LogFilter): GetContractClassLogsResponse { + async #filterContractClassLogsBetweenBlocks(filter: LogFilter): Promise { const start = filter.afterLog?.blockNumber ?? Math.max(filter.fromBlock ?? INITIAL_L2_BLOCK_NUM, INITIAL_L2_BLOCK_NUM); const end = filter.toBlock; @@ -365,7 +372,10 @@ export class LogStore { const logs: ExtendedUnencryptedL2Log[] = []; let maxLogsHit = false; - loopOverBlocks: for (const [blockNumber, logBuffer] of this.#contractClassLogsByBlock.entries({ start, end })) { + loopOverBlocks: for await (const [blockNumber, logBuffer] of this.#contractClassLogsByBlock.entriesAsync({ + start, + end, + })) { const contractClassLogsInBlock = ContractClass2BlockL2Logs.fromBuffer(logBuffer); for (let txIndex = filter.afterLog?.txIndex ?? 0; txIndex < contractClassLogsInBlock.txLogs.length; txIndex++) { const txLogs = contractClassLogsInBlock.txLogs[txIndex].unrollLogs(); diff --git a/yarn-project/archiver/src/archiver/kv_archiver_store/message_store.ts b/yarn-project/archiver/src/archiver/kv_archiver_store/message_store.ts index fe54bd4f4b90..2b0be80df61c 100644 --- a/yarn-project/archiver/src/archiver/kv_archiver_store/message_store.ts +++ b/yarn-project/archiver/src/archiver/kv_archiver_store/message_store.ts @@ -1,7 +1,7 @@ import { InboxLeaf } from '@aztec/circuit-types'; import { Fr, L1_TO_L2_MSG_SUBTREE_HEIGHT } from '@aztec/circuits.js'; import { createLogger } from '@aztec/foundation/log'; -import { type AztecKVStore, type AztecMap, type AztecSingleton } from '@aztec/kv-store'; +import type { AztecAsyncKVStore, AztecAsyncMap, AztecAsyncSingleton } from '@aztec/kv-store'; import { type DataRetrieval } from '../structs/data_retrieval.js'; @@ -9,36 +9,36 @@ import { type DataRetrieval } from '../structs/data_retrieval.js'; * LMDB implementation of the ArchiverDataStore interface. */ export class MessageStore { - #l1ToL2Messages: AztecMap; - #l1ToL2MessageIndices: AztecMap; - #lastSynchedL1Block: AztecSingleton; - #totalMessageCount: AztecSingleton; + #l1ToL2Messages: AztecAsyncMap; + #l1ToL2MessageIndices: AztecAsyncMap; + #lastSynchedL1Block: AztecAsyncSingleton; + #totalMessageCount: AztecAsyncSingleton; #log = createLogger('archiver:message_store'); #l1ToL2MessagesSubtreeSize = 2 ** L1_TO_L2_MSG_SUBTREE_HEIGHT; - constructor(private db: AztecKVStore) { + constructor(private db: AztecAsyncKVStore) { this.#l1ToL2Messages = db.openMap('archiver_l1_to_l2_messages'); this.#l1ToL2MessageIndices = db.openMap('archiver_l1_to_l2_message_indices'); this.#lastSynchedL1Block = db.openSingleton('archiver_last_l1_block_new_messages'); this.#totalMessageCount = db.openSingleton('archiver_l1_to_l2_message_count'); } - getTotalL1ToL2MessageCount(): bigint { - return this.#totalMessageCount.get() ?? 0n; + async getTotalL1ToL2MessageCount(): Promise { + return (await this.#totalMessageCount.getAsync()) ?? 0n; } /** * Gets the last L1 block number that emitted new messages. * @returns The last L1 block number processed */ - getSynchedL1BlockNumber(): bigint | undefined { - return this.#lastSynchedL1Block.get(); + getSynchedL1BlockNumber(): Promise { + return this.#lastSynchedL1Block.getAsync(); } - setSynchedL1BlockNumber(l1BlockNumber: bigint) { - void this.#lastSynchedL1Block.set(l1BlockNumber); + async setSynchedL1BlockNumber(l1BlockNumber: bigint): Promise { + await this.#lastSynchedL1Block.set(l1BlockNumber); } /** @@ -47,22 +47,22 @@ export class MessageStore { * @returns True if the operation is successful. */ addL1ToL2Messages(messages: DataRetrieval): Promise { - return this.db.transaction(() => { - const lastL1BlockNumber = this.#lastSynchedL1Block.get() ?? 0n; + return this.db.transactionAsync(async () => { + const lastL1BlockNumber = (await this.#lastSynchedL1Block.getAsync()) ?? 0n; if (lastL1BlockNumber >= messages.lastProcessedL1BlockNumber) { return false; } - void this.#lastSynchedL1Block.set(messages.lastProcessedL1BlockNumber); + await this.#lastSynchedL1Block.set(messages.lastProcessedL1BlockNumber); for (const message of messages.retrievedData) { const key = `${message.index}`; - void this.#l1ToL2Messages.set(key, message.leaf.toBuffer()); - void this.#l1ToL2MessageIndices.set(message.leaf.toString(), message.index); + await this.#l1ToL2Messages.set(key, message.leaf.toBuffer()); + await this.#l1ToL2MessageIndices.set(message.leaf.toString(), message.index); } - const lastTotalMessageCount = this.getTotalL1ToL2MessageCount(); - void this.#totalMessageCount.set(lastTotalMessageCount + BigInt(messages.retrievedData.length)); + const lastTotalMessageCount = await this.getTotalL1ToL2MessageCount(); + await this.#totalMessageCount.set(lastTotalMessageCount + BigInt(messages.retrievedData.length)); return true; }); @@ -74,17 +74,17 @@ export class MessageStore { * @returns The index of the L1 to L2 message in the L1 to L2 message tree (undefined if not found). */ getL1ToL2MessageIndex(l1ToL2Message: Fr): Promise { - return Promise.resolve(this.#l1ToL2MessageIndices.get(l1ToL2Message.toString())); + return this.#l1ToL2MessageIndices.getAsync(l1ToL2Message.toString()); } - getL1ToL2Messages(blockNumber: bigint): Fr[] { + async getL1ToL2Messages(blockNumber: bigint): Promise { const messages: Fr[] = []; let undefinedMessageFound = false; const startIndex = Number(InboxLeaf.smallestIndexFromL2Block(blockNumber)); for (let i = startIndex; i < startIndex + this.#l1ToL2MessagesSubtreeSize; i++) { // This is inefficient but probably fine for now. const key = `${i}`; - const message = this.#l1ToL2Messages.get(key); + const message = await this.#l1ToL2Messages.getAsync(key); if (message) { if (undefinedMessageFound) { throw new Error(`L1 to L2 message gap found in block ${blockNumber}`); diff --git a/yarn-project/archiver/src/archiver/kv_archiver_store/nullifier_store.ts b/yarn-project/archiver/src/archiver/kv_archiver_store/nullifier_store.ts index 3a300cb907f5..1f70bd14a0cd 100644 --- a/yarn-project/archiver/src/archiver/kv_archiver_store/nullifier_store.ts +++ b/yarn-project/archiver/src/archiver/kv_archiver_store/nullifier_store.ts @@ -1,15 +1,15 @@ import { type InBlock, type L2Block } from '@aztec/circuit-types'; import { type Fr, MAX_NULLIFIERS_PER_TX } from '@aztec/circuits.js'; import { createLogger } from '@aztec/foundation/log'; -import { type AztecKVStore, type AztecMap } from '@aztec/kv-store'; +import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store'; export class NullifierStore { - #nullifiersToBlockNumber: AztecMap; - #nullifiersToBlockHash: AztecMap; - #nullifiersToIndex: AztecMap; + #nullifiersToBlockNumber: AztecAsyncMap; + #nullifiersToBlockHash: AztecAsyncMap; + #nullifiersToIndex: AztecAsyncMap; #log = createLogger('archiver:log_store'); - constructor(private db: AztecKVStore) { + constructor(private db: AztecAsyncKVStore) { this.#nullifiersToBlockNumber = db.openMap('archiver_nullifiers_to_block_number'); this.#nullifiersToBlockHash = db.openMap('archiver_nullifiers_to_block_hash'); this.#nullifiersToIndex = db.openMap('archiver_nullifiers_to_index'); @@ -17,31 +17,39 @@ export class NullifierStore { async addNullifiers(blocks: L2Block[]): Promise { const blockHashes = await Promise.all(blocks.map(block => block.hash())); - await this.db.transaction(() => { - blocks.forEach((block, i) => { - const dataStartIndexForBlock = - block.header.state.partial.nullifierTree.nextAvailableLeafIndex - - block.body.txEffects.length * MAX_NULLIFIERS_PER_TX; - block.body.txEffects.forEach((txEffects, txIndex) => { - const dataStartIndexForTx = dataStartIndexForBlock + txIndex * MAX_NULLIFIERS_PER_TX; - txEffects.nullifiers.forEach((nullifier, nullifierIndex) => { - void this.#nullifiersToBlockNumber.set(nullifier.toString(), block.number); - void this.#nullifiersToBlockHash.set(nullifier.toString(), blockHashes[i].toString()); - void this.#nullifiersToIndex.set(nullifier.toString(), dataStartIndexForTx + nullifierIndex); - }); - }); - }); + await this.db.transactionAsync(async () => { + await Promise.all( + blocks.map((block, i) => { + const dataStartIndexForBlock = + block.header.state.partial.nullifierTree.nextAvailableLeafIndex - + block.body.txEffects.length * MAX_NULLIFIERS_PER_TX; + return Promise.all( + block.body.txEffects.map((txEffects, txIndex) => { + const dataStartIndexForTx = dataStartIndexForBlock + txIndex * MAX_NULLIFIERS_PER_TX; + return Promise.all( + txEffects.nullifiers.map(async (nullifier, nullifierIndex) => { + await this.#nullifiersToBlockNumber.set(nullifier.toString(), block.number); + await this.#nullifiersToBlockHash.set(nullifier.toString(), blockHashes[i].toString()); + await this.#nullifiersToIndex.set(nullifier.toString(), dataStartIndexForTx + nullifierIndex); + }), + ); + }), + ); + }), + ); }); return true; } async deleteNullifiers(blocks: L2Block[]): Promise { - await this.db.transaction(() => { + await this.db.transactionAsync(async () => { for (const block of blocks) { for (const nullifier of block.body.txEffects.flatMap(tx => tx.nullifiers)) { - void this.#nullifiersToBlockNumber.delete(nullifier.toString()); - void this.#nullifiersToBlockHash.delete(nullifier.toString()); - void this.#nullifiersToIndex.delete(nullifier.toString()); + await Promise.all([ + this.#nullifiersToBlockNumber.delete(nullifier.toString()), + this.#nullifiersToBlockHash.delete(nullifier.toString()), + this.#nullifiersToIndex.delete(nullifier.toString()), + ]); } } }); @@ -52,13 +60,22 @@ export class NullifierStore { blockNumber: number, nullifiers: Fr[], ): Promise<(InBlock | undefined)[]> { - const maybeNullifiers = await this.db.transaction(() => { - return nullifiers.map(nullifier => ({ - data: this.#nullifiersToIndex.get(nullifier.toString()), - l2BlockNumber: this.#nullifiersToBlockNumber.get(nullifier.toString()), - l2BlockHash: this.#nullifiersToBlockHash.get(nullifier.toString()), - })); - }); + const asStrings = nullifiers.map(x => x.toString()); + + const maybeNullifiers = await Promise.all( + asStrings.map(async nullifier => { + const [data, l2BlockNumber, l2BlockHash] = await Promise.all([ + this.#nullifiersToIndex.getAsync(nullifier), + this.#nullifiersToBlockNumber.getAsync(nullifier), + this.#nullifiersToBlockHash.getAsync(nullifier), + ]); + return { + data, + l2BlockNumber, + l2BlockHash, + }; + }), + ); return maybeNullifiers.map(({ data, l2BlockNumber, l2BlockHash }) => { if ( data === undefined || diff --git a/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts b/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts index 4c5e8d154e38..2ddcbc77a004 100644 --- a/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts +++ b/yarn-project/archiver/src/archiver/memory_archiver_store/memory_archiver_store.ts @@ -302,8 +302,8 @@ export class MemoryArchiverStore implements ArchiverDataStore { */ addLogs(blocks: L2Block[]): Promise { blocks.forEach(block => { - void this.#storeTaggedLogsFromPrivate(block); - void this.#storeTaggedLogsFromPublic(block); + this.#storeTaggedLogsFromPrivate(block); + this.#storeTaggedLogsFromPublic(block); this.privateLogsPerBlock.set(block.number, block.body.txEffects.map(txEffect => txEffect.privateLogs).flat()); this.publicLogsPerBlock.set(block.number, block.body.txEffects.map(txEffect => txEffect.publicLogs).flat()); this.contractClassLogsPerBlock.set(block.number, block.body.contractClassLogs); @@ -751,7 +751,7 @@ export class MemoryArchiverStore implements ArchiverDataStore { } } - public estimateSize(): { mappingSize: number; actualSize: number; numItems: number } { - return { mappingSize: 0, actualSize: 0, numItems: 0 }; + public estimateSize(): Promise<{ mappingSize: number; actualSize: number; numItems: number }> { + return Promise.resolve({ mappingSize: 0, actualSize: 0, numItems: 0 }); } } diff --git a/yarn-project/archiver/src/factory.ts b/yarn-project/archiver/src/factory.ts index 7db0968af918..c210aae82610 100644 --- a/yarn-project/archiver/src/factory.ts +++ b/yarn-project/archiver/src/factory.ts @@ -9,7 +9,7 @@ import { FunctionType, decodeFunctionSignature } from '@aztec/foundation/abi'; import { createLogger } from '@aztec/foundation/log'; import { type Maybe } from '@aztec/foundation/types'; import { type DataStoreConfig } from '@aztec/kv-store/config'; -import { createStore } from '@aztec/kv-store/lmdb'; +import { createStore } from '@aztec/kv-store/lmdb-v2'; import { TokenContractArtifact } from '@aztec/noir-contracts.js/Token'; import { TokenBridgeContractArtifact } from '@aztec/noir-contracts.js/TokenBridge'; import { protocolContractNames } from '@aztec/protocol-contracts'; diff --git a/yarn-project/aztec-node/src/aztec-node/server.ts b/yarn-project/aztec-node/src/aztec-node/server.ts index 8d0f053a9897..6e215c114543 100644 --- a/yarn-project/aztec-node/src/aztec-node/server.ts +++ b/yarn-project/aztec-node/src/aztec-node/server.ts @@ -449,7 +449,7 @@ export class AztecNodeService implements AztecNode, Traceable { // We first check if the tx is in pending (instead of first checking if it is mined) because if we first check // for mined and then for pending there could be a race condition where the tx is mined between the two checks // and we would incorrectly return a TxReceipt with status DROPPED - if (this.p2pClient.getTxStatus(txHash) === 'pending') { + if ((await this.p2pClient.getTxStatus(txHash)) === 'pending') { txReceipt = new TxReceipt(txHash, TxStatus.PENDING, ''); } diff --git a/yarn-project/circuit-types/src/p2p/block_attestation.ts b/yarn-project/circuit-types/src/p2p/block_attestation.ts index 9099699ccf13..a8389ef6c508 100644 --- a/yarn-project/circuit-types/src/p2p/block_attestation.ts +++ b/yarn-project/circuit-types/src/p2p/block_attestation.ts @@ -70,7 +70,7 @@ export class BlockAttestation extends Gossipable { * Lazily evaluate and cache the sender of the attestation * @returns The sender of the attestation */ - async getSender() { + async getSender(): Promise { if (!this.sender) { // Recover the sender from the attestation const hashed = await getHashedSignaturePayloadEthSignedMessage( diff --git a/yarn-project/end-to-end/scripts/test_simple.sh b/yarn-project/end-to-end/scripts/test_simple.sh index 26d68d5c9814..781ab93c7957 100755 --- a/yarn-project/end-to-end/scripts/test_simple.sh +++ b/yarn-project/end-to-end/scripts/test_simple.sh @@ -10,7 +10,7 @@ # - runInBand is provided, to run the test in the main thread (no child process). It avoids various issues. set -eu -export CHROME_BIN=/root/.cache/ms-playwright/chromium-1155/chrome-linux/chrome +export CHROME_BIN=/root/.cache/ms-playwright/chromium-1148/chrome-linux/chrome export HARDWARE_CONCURRENCY=16 export RAYON_NUM_THREADS=1 export LOG_LEVEL=${LOG_LEVEL:-verbose} diff --git a/yarn-project/end-to-end/src/e2e_prover/full.test.ts b/yarn-project/end-to-end/src/e2e_prover/full.test.ts index 4f2d68d3e980..c89812cc3572 100644 --- a/yarn-project/end-to-end/src/e2e_prover/full.test.ts +++ b/yarn-project/end-to-end/src/e2e_prover/full.test.ts @@ -53,7 +53,7 @@ describe('full_prover', () => { address: t.l1Contracts.l1ContractAddresses.rewardDistributorAddress.toString(), client: t.l1Contracts.publicClient, }); - }); + }, 60_000); afterAll(async () => { await t.teardown(); diff --git a/yarn-project/end-to-end/tsconfig.json b/yarn-project/end-to-end/tsconfig.json index 15793f32a2c6..0343efeec57c 100644 --- a/yarn-project/end-to-end/tsconfig.json +++ b/yarn-project/end-to-end/tsconfig.json @@ -36,6 +36,9 @@ { "path": "../entrypoints" }, + { + "path": "../epoch-cache" + }, { "path": "../ethereum" }, diff --git a/yarn-project/foundation/package.json b/yarn-project/foundation/package.json index 86200f0c3e6d..8110aac88a9a 100644 --- a/yarn-project/foundation/package.json +++ b/yarn-project/foundation/package.json @@ -52,7 +52,8 @@ "./array": "./dest/array/index.js", "./validation": "./dest/validation/index.js", "./promise": "./dest/promise/index.js", - "./string": "./dest/string/index.js" + "./string": "./dest/string/index.js", + "./message": "./dest/message/index.js" }, "scripts": { "build": "yarn clean && tsc -b", diff --git a/yarn-project/foundation/src/iterable/toArray.ts b/yarn-project/foundation/src/iterable/toArray.ts index 6c586a6c3f3c..10baf75e38a1 100644 --- a/yarn-project/foundation/src/iterable/toArray.ts +++ b/yarn-project/foundation/src/iterable/toArray.ts @@ -1,4 +1,6 @@ -export async function toArray(iterator: Iterable | AsyncIterableIterator | IterableIterator): Promise { +export async function toArray( + iterator: Iterable | AsyncIterableIterator | AsyncIterable | IterableIterator, +): Promise { const arr = []; for await (const i of iterator) { arr.push(i); diff --git a/yarn-project/foundation/src/message/index.ts b/yarn-project/foundation/src/message/index.ts new file mode 100644 index 000000000000..eae0730b2a5e --- /dev/null +++ b/yarn-project/foundation/src/message/index.ts @@ -0,0 +1,43 @@ +export type MessageHeaderInit = { + /** The message ID. Optional, if not set defaults to 0 */ + messageId?: number; + /** Identifies the original request. Optional */ + requestId?: number; +}; + +export class MessageHeader { + /** An number to identify this message */ + public readonly messageId: number; + /** If this message is a response to a request, the messageId of the request */ + public readonly requestId: number; + + constructor({ messageId, requestId }: MessageHeaderInit) { + this.messageId = messageId ?? 0; + this.requestId = requestId ?? 0; + } + + static fromMessagePack(data: object): MessageHeader { + return new MessageHeader(data as MessageHeaderInit); + } +} + +interface TypedMessageLike { + msgType: number; + header: { + messageId?: number; + requestId?: number; + }; + value: any; +} + +export class TypedMessage { + public constructor(public readonly msgType: T, public readonly header: MessageHeader, public readonly value: B) {} + + static fromMessagePack(data: TypedMessageLike): TypedMessage { + return new TypedMessage(data['msgType'] as T, MessageHeader.fromMessagePack(data['header']), data['value']); + } + + static isTypedMessageLike(obj: any): obj is TypedMessageLike { + return typeof obj === 'object' && obj !== null && 'msgType' in obj && 'header' in obj && 'value' in obj; + } +} diff --git a/yarn-project/ivc-integration/package.json b/yarn-project/ivc-integration/package.json index 302feecb2272..b2ef772e3228 100644 --- a/yarn-project/ivc-integration/package.json +++ b/yarn-project/ivc-integration/package.json @@ -71,7 +71,7 @@ "chalk": "^5.3.0", "change-case": "^5.4.4", "pako": "^2.1.0", - "playwright": "^1.50.0", + "playwright": "1.49.0", "puppeteer": "^22.4.1", "tslib": "^2.4.0" }, @@ -84,7 +84,7 @@ "@aztec/world-state": "workspace:^", "@jest/globals": "^29.5.0", "@msgpack/msgpack": "^3.0.0-beta2", - "@playwright/test": "^1.50.0", + "@playwright/test": "1.49.0", "@types/jest": "^29.5.0", "@types/node": "^22.8.1", "@types/pako": "^2.0.3", diff --git a/yarn-project/kv-store/package.json b/yarn-project/kv-store/package.json index aba97045383c..3d14bdae4362 100644 --- a/yarn-project/kv-store/package.json +++ b/yarn-project/kv-store/package.json @@ -5,6 +5,7 @@ "exports": { ".": "./dest/interfaces/index.js", "./lmdb": "./dest/lmdb/index.js", + "./lmdb-v2": "./dest/lmdb-v2/index.js", "./indexeddb": "./dest/indexeddb/index.js", "./stores": "./dest/stores/index.js", "./config": "./dest/config.js" @@ -12,12 +13,14 @@ "scripts": { "build": "yarn clean && tsc -b", "build:dev": "tsc -b --watch", + "clean:cpp": "rm -rf $(git rev-parse --show-toplevel)/barretenberg/cpp/build-pic", "clean": "rm -rf ./dest .tsbuildinfo", "formatting": "run -T prettier --check ./src && run -T eslint ./src", "formatting:fix": "run -T eslint --fix ./src && run -T prettier -w ./src", "test:node": "NODE_NO_WARNINGS=1 mocha --config ./.mocharc.json --reporter dot", "test:browser": "wtr --config ./web-test-runner.config.mjs", - "test": "yarn test:node && yarn test:browser && true" + "test": "yarn test:node && yarn test:browser && true", + "generate": "mkdir -p build && cp -v ../../barretenberg/cpp/build-pic/lib/nodejs_module.node build" }, "inherits": [ "../package.common.json", @@ -27,8 +30,11 @@ "@aztec/circuit-types": "workspace:^", "@aztec/ethereum": "workspace:^", "@aztec/foundation": "workspace:^", + "@aztec/native": "workspace:^", "idb": "^8.0.0", - "lmdb": "^3.2.0" + "lmdb": "^3.2.0", + "msgpackr": "^1.11.2", + "ordered-binary": "^1.5.3" }, "devDependencies": { "@aztec/circuits.js": "workspace:^", @@ -39,6 +45,7 @@ "@types/mocha": "^10.0.10", "@types/mocha-each": "^2.0.4", "@types/node": "^18.7.23", + "@types/sinon": "^17.0.3", "@web/dev-server-esbuild": "^1.0.3", "@web/test-runner": "^0.19.0", "@web/test-runner-playwright": "^0.11.0", @@ -47,6 +54,7 @@ "jest": "^29.5.0", "mocha": "^10.8.2", "mocha-each": "^2.0.1", + "sinon": "^19.0.2", "ts-node": "^10.9.1", "typescript": "^5.0.4" }, diff --git a/yarn-project/kv-store/src/indexeddb/store.ts b/yarn-project/kv-store/src/indexeddb/store.ts index fe72cdf06621..05b0c9e75877 100644 --- a/yarn-project/kv-store/src/indexeddb/store.ts +++ b/yarn-project/kv-store/src/indexeddb/store.ts @@ -3,7 +3,7 @@ import { type Logger } from '@aztec/foundation/log'; import { type DBSchema, type IDBPDatabase, deleteDB, openDB } from 'idb'; import { type AztecAsyncArray } from '../interfaces/array.js'; -import { type Key } from '../interfaces/common.js'; +import { type Key, type StoreSize } from '../interfaces/common.js'; import { type AztecAsyncCounter } from '../interfaces/counter.js'; import { type AztecAsyncMap, type AztecAsyncMultiMap } from '../interfaces/map.js'; import { type AztecAsyncSet } from '../interfaces/set.js'; @@ -124,7 +124,7 @@ export class AztecIndexedDBStore implements AztecAsyncKVStore { return multimap; } - openCounter>(_name: string): AztecAsyncCounter { + openCounter(_name: string): AztecAsyncCounter { throw new Error('Method not implemented.'); } @@ -187,7 +187,11 @@ export class AztecIndexedDBStore implements AztecAsyncKVStore { return deleteDB(this.#name); } - estimateSize(): { mappingSize: number; actualSize: number; numItems: number } { - return { mappingSize: 0, actualSize: 0, numItems: 0 }; + estimateSize(): Promise { + return Promise.resolve({ mappingSize: 0, actualSize: 0, numItems: 0 }); + } + + close(): Promise { + return Promise.resolve(); } } diff --git a/yarn-project/kv-store/src/interfaces/common.ts b/yarn-project/kv-store/src/interfaces/common.ts index c4e0effa8c83..8edf29a3e016 100644 --- a/yarn-project/kv-store/src/interfaces/common.ts +++ b/yarn-project/kv-store/src/interfaces/common.ts @@ -1,7 +1,7 @@ /** * The key type for use with the kv-store */ -export type Key = string | number | Array; +export type Key = string | number; /** * A range of keys to iterate over. @@ -16,3 +16,5 @@ export type Range = { /** The maximum number of items to iterate over */ limit?: number; }; + +export type StoreSize = { mappingSize: number; actualSize: number; numItems: number }; diff --git a/yarn-project/kv-store/src/interfaces/index.ts b/yarn-project/kv-store/src/interfaces/index.ts index c97d327fa04d..05d8e3f8fa15 100644 --- a/yarn-project/kv-store/src/interfaces/index.ts +++ b/yarn-project/kv-store/src/interfaces/index.ts @@ -4,4 +4,4 @@ export * from './counter.js'; export * from './singleton.js'; export * from './store.js'; export * from './set.js'; -export { Range } from './common.js'; +export { Range, StoreSize } from './common.js'; diff --git a/yarn-project/kv-store/src/interfaces/map.ts b/yarn-project/kv-store/src/interfaces/map.ts index f63505dae9fc..bc594588db54 100644 --- a/yarn-project/kv-store/src/interfaces/map.ts +++ b/yarn-project/kv-store/src/interfaces/map.ts @@ -11,13 +11,6 @@ interface AztecBaseMap { */ set(key: K, val: V): Promise; - /** - * Atomically swap the value at the given key - * @param key - The key to swap the value at - * @param fn - The function to swap the value with - */ - swap(key: K, fn: (val: V | undefined) => V): Promise; - /** * Sets the value at the given key if it does not already exist. * @param key - The key to set the value at diff --git a/yarn-project/kv-store/src/interfaces/map_test_suite.ts b/yarn-project/kv-store/src/interfaces/map_test_suite.ts index 7736315ec6f7..881aff90f9f0 100644 --- a/yarn-project/kv-store/src/interfaces/map_test_suite.ts +++ b/yarn-project/kv-store/src/interfaces/map_test_suite.ts @@ -18,7 +18,7 @@ export function describeAztecMap( beforeEach(async () => { store = await getStore(); - map = store.openMultiMap('test'); + map = store.openMultiMap('test'); }); afterEach(async () => { @@ -125,21 +125,6 @@ export function describeAztecMap( expect(await getValues('foo')).to.deep.equal(['baz']); }); - it('supports tuple keys', async () => { - // Use a new map because key structure has changed - const tupleMap = store.openMap<[number, string], string>('test-tuple'); - - await tupleMap.set([5, 'bar'], 'val'); - await tupleMap.set([0, 'foo'], 'val'); - - expect(await keys(undefined, tupleMap)).to.deep.equal([ - [0, 'foo'], - [5, 'bar'], - ]); - - expect(await get([5, 'bar'], tupleMap)).to.equal('val'); - }); - it('supports range queries', async () => { await map.set('a', 'a'); await map.set('b', 'b'); diff --git a/yarn-project/kv-store/src/interfaces/store.ts b/yarn-project/kv-store/src/interfaces/store.ts index bee1e2e0e8a0..176628645da6 100644 --- a/yarn-project/kv-store/src/interfaces/store.ts +++ b/yarn-project/kv-store/src/interfaces/store.ts @@ -1,5 +1,5 @@ import { type AztecArray, type AztecAsyncArray } from './array.js'; -import { type Key } from './common.js'; +import { type Key, type StoreSize } from './common.js'; import { type AztecAsyncCounter, type AztecCounter } from './counter.js'; import { type AztecAsyncMap, @@ -94,7 +94,12 @@ export interface AztecKVStore { /** * Estimates the size of the store in bytes. */ - estimateSize(): { mappingSize: number; actualSize: number; numItems: number }; + estimateSize(): Promise; + + /** + * Closes the store + */ + close(): Promise; } export interface AztecAsyncKVStore { @@ -163,5 +168,10 @@ export interface AztecAsyncKVStore { /** * Estimates the size of the store in bytes. */ - estimateSize(): { mappingSize: number; actualSize: number; numItems: number }; + estimateSize(): Promise; + + /** + * Closes the store + */ + close(): Promise; } diff --git a/yarn-project/kv-store/src/lmdb-v2/factory.ts b/yarn-project/kv-store/src/lmdb-v2/factory.ts new file mode 100644 index 000000000000..29994263ad6e --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/factory.ts @@ -0,0 +1,79 @@ +import { EthAddress } from '@aztec/circuits.js'; +import { type Logger, createLogger } from '@aztec/foundation/log'; + +import { mkdir, mkdtemp, readFile, rm, writeFile } from 'fs/promises'; +import { tmpdir } from 'os'; +import { join } from 'path'; + +import { type DataStoreConfig } from '../config.js'; +import { AztecLMDBStoreV2 } from './store.js'; + +const ROLLUP_ADDRESS_FILE = 'rollup_address'; +const MAX_READERS = 16; + +export async function createStore( + name: string, + config: DataStoreConfig, + log: Logger = createLogger('kv-store:lmdb-v2:' + name), +): Promise { + const { dataDirectory, l1Contracts } = config; + + let store: AztecLMDBStoreV2; + if (typeof dataDirectory !== 'undefined') { + const subDir = join(dataDirectory, name); + await mkdir(subDir, { recursive: true }); + + if (l1Contracts) { + const { rollupAddress } = l1Contracts; + const localRollupAddress = await readFile(join(subDir, ROLLUP_ADDRESS_FILE), 'utf-8') + .then(EthAddress.fromString) + .catch(() => EthAddress.ZERO); + + if (!localRollupAddress.equals(rollupAddress)) { + if (!localRollupAddress.isZero()) { + log.warn(`Rollup address mismatch. Clearing entire database...`, { + expected: rollupAddress, + found: localRollupAddress, + }); + + await rm(subDir, { recursive: true, force: true }); + await mkdir(subDir, { recursive: true }); + } + + await writeFile(join(subDir, ROLLUP_ADDRESS_FILE), rollupAddress.toString()); + } + } + + log.info( + `Creating ${name} data store at directory ${subDir} with map size ${config.dataStoreMapSizeKB} KB (LMDB v2)`, + ); + store = await AztecLMDBStoreV2.new(subDir, config.dataStoreMapSizeKB, MAX_READERS, () => Promise.resolve(), log); + } else { + store = await openTmpStore(name, true, config.dataStoreMapSizeKB, MAX_READERS, log); + } + + return store; +} + +export async function openTmpStore( + name: string, + ephemeral: boolean = true, + dbMapSizeKb = 10 * 1_024 * 1_024, // 10GB + maxReaders = MAX_READERS, + log: Logger = createLogger('kv-store:lmdb-v2:' + name), +): Promise { + const dataDir = await mkdtemp(join(tmpdir(), name + '-')); + log.debug(`Created temporary data store at: ${dataDir} with size: ${dbMapSizeKb} KB (LMDB v2)`); + + // pass a cleanup callback because process.on('beforeExit', cleanup) does not work under Jest + const cleanup = async () => { + if (ephemeral) { + await rm(dataDir, { recursive: true, force: true }); + log.debug(`Deleted temporary data store: ${dataDir}`); + } else { + log.debug(`Leaving temporary data store: ${dataDir}`); + } + }; + + return AztecLMDBStoreV2.new(dataDir, dbMapSizeKb, maxReaders, cleanup, log); +} diff --git a/yarn-project/kv-store/src/lmdb-v2/index.ts b/yarn-project/kv-store/src/lmdb-v2/index.ts new file mode 100644 index 000000000000..af723281fd47 --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/index.ts @@ -0,0 +1,2 @@ +export * from './store.js'; +export * from './factory.js'; diff --git a/yarn-project/kv-store/src/lmdb-v2/map.test.ts b/yarn-project/kv-store/src/lmdb-v2/map.test.ts new file mode 100644 index 000000000000..c49726ceabea --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/map.test.ts @@ -0,0 +1,4 @@ +import { describeAztecMap } from '../interfaces/map_test_suite.js'; +import { openTmpStore } from './factory.js'; + +describeAztecMap('LMDBMap', () => openTmpStore('test'), true); diff --git a/yarn-project/kv-store/src/lmdb-v2/map.ts b/yarn-project/kv-store/src/lmdb-v2/map.ts new file mode 100644 index 000000000000..c0b5141b5b87 --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/map.ts @@ -0,0 +1,233 @@ +import { Encoder } from 'msgpackr'; + +import type { Key, Range } from '../interfaces/common.js'; +import type { AztecAsyncMap, AztecAsyncMultiMap } from '../interfaces/map.js'; +import { type ReadTransaction } from './read_transaction.js'; +import { type AztecLMDBStoreV2, execInReadTx, execInWriteTx } from './store.js'; +import { deserializeKey, maxKey, minKey, serializeKey } from './utils.js'; + +export class LMDBMap implements AztecAsyncMap { + private prefix: string; + private encoder = new Encoder(); + + constructor(private store: AztecLMDBStoreV2, name: string) { + this.prefix = `map:${name}`; + } + /** + * Sets the value at the given key. + * @param key - The key to set the value at + * @param val - The value to set + */ + set(key: K, val: V): Promise { + return execInWriteTx(this.store, tx => tx.set(serializeKey(this.prefix, key), this.encoder.pack(val))); + } + + /** + * Sets the value at the given key if it does not already exist. + * @param key - The key to set the value at + * @param val - The value to set + */ + setIfNotExists(key: K, val: V): Promise { + return execInWriteTx(this.store, async tx => { + const strKey = serializeKey(this.prefix, key); + const exists = !!(await tx.get(strKey)); + if (!exists) { + await tx.set(strKey, this.encoder.pack(val)); + return true; + } + return false; + }); + } + + /** + * Deletes the value at the given key. + * @param key - The key to delete the value at + */ + delete(key: K): Promise { + return execInWriteTx(this.store, tx => tx.remove(serializeKey(this.prefix, key))); + } + + getAsync(key: K): Promise { + return execInReadTx(this.store, async tx => { + const val = await tx.get(serializeKey(this.prefix, key)); + return val ? this.encoder.unpack(val) : undefined; + }); + } + + hasAsync(key: K): Promise { + return execInReadTx(this.store, async tx => !!(await tx.get(serializeKey(this.prefix, key)))); + } + + /** + * Iterates over the map's key-value entries in the key's natural order + * @param range - The range of keys to iterate over + */ + async *entriesAsync(range?: Range): AsyncIterableIterator<[K, V]> { + const reverse = range?.reverse ?? false; + const startKey = range?.start ? serializeKey(this.prefix, range.start) : minKey(this.prefix); + + const endKey = range?.end ? serializeKey(this.prefix, range.end) : reverse ? maxKey(this.prefix) : undefined; + + let tx: ReadTransaction | undefined = this.store.getCurrentWriteTx(); + const shouldClose = !tx; + tx ??= this.store.getReadTx(); + + try { + for await (const [key, val] of tx.iterate( + reverse ? endKey! : startKey, + reverse ? startKey : endKey, + reverse, + range?.limit, + )) { + const deserializedKey = deserializeKey(this.prefix, key); + if (!deserializedKey) { + break; + } + yield [deserializedKey, this.encoder.unpack(val)]; + } + } finally { + if (shouldClose) { + tx.close(); + } + } + } + + /** + * Iterates over the map's values in the key's natural order + * @param range - The range of keys to iterate over + */ + async *valuesAsync(range?: Range): AsyncIterableIterator { + for await (const [_, value] of this.entriesAsync(range)) { + yield value; + } + } + + /** + * Iterates over the map's keys in the key's natural order + * @param range - The range of keys to iterate over + */ + async *keysAsync(range?: Range): AsyncIterableIterator { + for await (const [key, _] of this.entriesAsync(range)) { + yield key; + } + } +} + +export class LMDBMultiMap implements AztecAsyncMultiMap { + private prefix: string; + private encoder = new Encoder(); + constructor(private store: AztecLMDBStoreV2, name: string) { + this.prefix = `multimap:${name}`; + } + + /** + * Sets the value at the given key. + * @param key - The key to set the value at + * @param val - The value to set + */ + set(key: K, val: V): Promise { + return execInWriteTx(this.store, tx => tx.setIndex(serializeKey(this.prefix, key), this.encoder.pack(val))); + } + + /** + * Sets the value at the given key if it does not already exist. + * @param key - The key to set the value at + * @param val - The value to set + */ + setIfNotExists(key: K, val: V): Promise { + return execInWriteTx(this.store, async tx => { + const exists = !!(await this.getAsync(key)); + if (!exists) { + await tx.setIndex(serializeKey(this.prefix, key), this.encoder.pack(val)); + return true; + } + return false; + }); + } + + /** + * Deletes the value at the given key. + * @param key - The key to delete the value at + */ + delete(key: K): Promise { + return execInWriteTx(this.store, tx => tx.removeIndex(serializeKey(this.prefix, key))); + } + + getAsync(key: K): Promise { + return execInReadTx(this.store, async tx => { + const val = await tx.getIndex(serializeKey(this.prefix, key)); + return val.length > 0 ? this.encoder.unpack(val[0]) : undefined; + }); + } + + hasAsync(key: K): Promise { + return execInReadTx(this.store, async tx => (await tx.getIndex(serializeKey(this.prefix, key))).length > 0); + } + + /** + * Iterates over the map's key-value entries in the key's natural order + * @param range - The range of keys to iterate over + */ + async *entriesAsync(range?: Range): AsyncIterableIterator<[K, V]> { + const reverse = range?.reverse ?? false; + const startKey = range?.start ? serializeKey(this.prefix, range.start) : minKey(this.prefix); + const endKey = range?.end ? serializeKey(this.prefix, range.end) : reverse ? maxKey(this.prefix) : undefined; + + let tx: ReadTransaction | undefined = this.store.getCurrentWriteTx(); + const shouldClose = !tx; + tx ??= this.store.getReadTx(); + + try { + for await (const [key, vals] of tx.iterateIndex( + reverse ? endKey! : startKey, + reverse ? startKey : endKey, + reverse, + range?.limit, + )) { + const deserializedKey = deserializeKey(this.prefix, key); + if (!deserializedKey) { + break; + } + + for (const val of vals) { + yield [deserializedKey, this.encoder.unpack(val)]; + } + } + } finally { + if (shouldClose) { + tx.close(); + } + } + } + + /** + * Iterates over the map's values in the key's natural order + * @param range - The range of keys to iterate over + */ + async *valuesAsync(range?: Range): AsyncIterableIterator { + for await (const [_, value] of this.entriesAsync(range)) { + yield value; + } + } + + /** + * Iterates over the map's keys in the key's natural order + * @param range - The range of keys to iterate over + */ + async *keysAsync(range?: Range): AsyncIterableIterator { + for await (const [key, _] of this.entriesAsync(range)) { + yield key; + } + } + + deleteValue(key: K, val: V | undefined): Promise { + return execInWriteTx(this.store, tx => tx.removeIndex(serializeKey(this.prefix, key), this.encoder.pack(val))); + } + + async *getValuesAsync(key: K): AsyncIterableIterator { + const values = await execInReadTx(this.store, tx => tx.getIndex(serializeKey(this.prefix, key))); + for (const value of values) { + yield this.encoder.unpack(value); + } + } +} diff --git a/yarn-project/kv-store/src/lmdb-v2/message.ts b/yarn-project/kv-store/src/lmdb-v2/message.ts new file mode 100644 index 000000000000..34875e56f316 --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/message.ts @@ -0,0 +1,146 @@ +export enum Database { + DATA = 'data', + INDEX = 'index', +} + +export const CURSOR_PAGE_SIZE = 10; + +export enum LMDBMessageType { + OPEN_DATABASE = 100, + GET, + HAS, + + START_CURSOR, + ADVANCE_CURSOR, + CLOSE_CURSOR, + + BATCH, + + STATS, + + CLOSE, +} + +type Key = Uint8Array; +type Value = Uint8Array; +type OptionalValues = Array; +type KeyOptionalValues = [Key, null | Array]; +type KeyValues = [Key, Value[]]; + +interface OpenDatabaseRequest { + db: string; + uniqueKeys?: boolean; +} + +interface GetRequest { + keys: Key[]; + db: string; +} + +interface GetResponse { + values: OptionalValues; +} + +interface HasRequest { + entries: KeyOptionalValues[]; + db: string; +} + +interface StartCursorRequest { + key: Key; + reverse: boolean; + count: number | null; + onePage: boolean | null; + db: string; +} + +interface AdvanceCursorRequest { + cursor: number; + count: number | null; +} + +interface CloseCursorRequest { + cursor: number; +} + +export interface Batch { + addEntries: Array; + removeEntries: Array; +} + +interface BatchRequest { + batches: Map; +} + +export type LMDBRequestBody = { + [LMDBMessageType.OPEN_DATABASE]: OpenDatabaseRequest; + + [LMDBMessageType.GET]: GetRequest; + [LMDBMessageType.HAS]: HasRequest; + + [LMDBMessageType.START_CURSOR]: StartCursorRequest; + [LMDBMessageType.ADVANCE_CURSOR]: AdvanceCursorRequest; + [LMDBMessageType.CLOSE_CURSOR]: CloseCursorRequest; + + [LMDBMessageType.BATCH]: BatchRequest; + + [LMDBMessageType.STATS]: void; + + [LMDBMessageType.CLOSE]: void; +}; + +interface GetResponse { + values: OptionalValues; +} + +interface HasResponse { + exists: boolean[]; +} + +interface StartCursorResponse { + cursor: number | null; + entries: Array; +} + +interface AdvanceCursorResponse { + entries: Array; + done: boolean; +} + +interface BatchResponse { + durationNs: number; +} + +interface BoolResponse { + ok: true; +} + +interface StatsResponse { + stats: Array<{ + name: string; + numDataItems: bigint | number; + totalUsedSize: bigint | number; + }>; + dbMapSizeBytes: bigint | number; +} + +export type LMDBResponseBody = { + [LMDBMessageType.OPEN_DATABASE]: BoolResponse; + + [LMDBMessageType.GET]: GetResponse; + [LMDBMessageType.HAS]: HasResponse; + + [LMDBMessageType.START_CURSOR]: StartCursorResponse; + [LMDBMessageType.ADVANCE_CURSOR]: AdvanceCursorResponse; + [LMDBMessageType.CLOSE_CURSOR]: BoolResponse; + + [LMDBMessageType.BATCH]: BatchResponse; + + [LMDBMessageType.STATS]: StatsResponse; + + [LMDBMessageType.CLOSE]: BoolResponse; +}; + +export interface LMDBMessageChannel { + sendMessage(msgType: T, body: LMDBRequestBody[T]): Promise; +} diff --git a/yarn-project/kv-store/src/lmdb-v2/read_transaction.test.ts b/yarn-project/kv-store/src/lmdb-v2/read_transaction.test.ts new file mode 100644 index 000000000000..4597ac3347cd --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/read_transaction.test.ts @@ -0,0 +1,171 @@ +import { toArray } from '@aztec/foundation/iterable'; +import { promiseWithResolvers } from '@aztec/foundation/promise'; + +import { expect } from 'chai'; +import { type SinonStubbedInstance, stub } from 'sinon'; + +import { + CURSOR_PAGE_SIZE, + Database, + type LMDBMessageChannel, + LMDBMessageType, + type LMDBResponseBody, +} from './message.js'; +import { ReadTransaction } from './read_transaction.js'; + +describe('ReadTransaction', () => { + let channel: SinonStubbedInstance; + let tx: ReadTransaction; + + beforeEach(() => { + channel = stub({ + sendMessage: () => {}, + } as any); + tx = new ReadTransaction(channel); + }); + + it('sends GET requests', async () => { + const getDeferred = promiseWithResolvers(); + + channel.sendMessage.returns(getDeferred.promise); + + const resp = tx.get(Buffer.from('test_key1')); + + expect( + channel.sendMessage.calledWith(LMDBMessageType.GET, { + db: Database.DATA, + keys: [Buffer.from('test_key1')], + }), + ).to.be.true; + + getDeferred.resolve({ + values: [[Buffer.from('foo')]], + }); + + expect(await resp).to.deep.eq(Buffer.from('foo')); + }); + + it('iterates the database', async () => { + channel.sendMessage.onCall(0).resolves({ + cursor: 42, + entries: [[Buffer.from('foo'), [Buffer.from('a value')]]], + done: false, + }); + channel.sendMessage.onCall(1).resolves({ + entries: [[Buffer.from('quux'), [Buffer.from('another value')]]], + done: true, + }); + channel.sendMessage.onCall(2).resolves({ + ok: true, + }); + + const iterable = tx.iterate(Buffer.from('foo')); + const entries = await toArray(iterable); + + expect(entries).to.deep.eq([ + [Buffer.from('foo'), Buffer.from('a value')], + [Buffer.from('quux'), Buffer.from('another value')], + ]); + + expect( + channel.sendMessage.calledWith(LMDBMessageType.START_CURSOR, { + db: Database.DATA, + key: Buffer.from('foo'), + count: CURSOR_PAGE_SIZE, + onePage: false, + reverse: false, + }), + ).to.be.true; + + expect( + channel.sendMessage.calledWith(LMDBMessageType.ADVANCE_CURSOR, { + cursor: 42, + count: CURSOR_PAGE_SIZE, + }), + ).to.be.true; + + expect( + channel.sendMessage.calledWith(LMDBMessageType.CLOSE_CURSOR, { + cursor: 42, + }), + ).to.be.true; + }); + + it('closes the cursor early', async () => { + channel.sendMessage.onCall(0).resolves({ + cursor: 42, + entries: [[Buffer.from('foo'), [Buffer.from('a value')]]], + done: false, + }); + + channel.sendMessage + .withArgs(LMDBMessageType.ADVANCE_CURSOR, { cursor: 42, count: CURSOR_PAGE_SIZE }) + .rejects(new Error('SHOULD NOT BE CALLED')); + + channel.sendMessage.withArgs(LMDBMessageType.CLOSE_CURSOR, { cursor: 42 }).resolves({ ok: true }); + + for await (const entry of tx.iterate(Buffer.from('foo'))) { + expect(entry).to.deep.eq([Buffer.from('foo'), Buffer.from('a value')]); + break; + } + + expect( + channel.sendMessage.calledWith(LMDBMessageType.CLOSE_CURSOR, { + cursor: 42, + }), + ).to.be.true; + }); + + it('closes the cursor even if in the case of an error', async () => { + channel.sendMessage.onCall(0).resolves({ + cursor: 42, + entries: [[Buffer.from('foo'), [Buffer.from('a value')]]], + done: false, + }); + + channel.sendMessage + .withArgs(LMDBMessageType.ADVANCE_CURSOR, { cursor: 42, count: CURSOR_PAGE_SIZE }) + .rejects(new Error('SHOULD NOT BE CALLED')); + + channel.sendMessage.withArgs(LMDBMessageType.CLOSE_CURSOR, { cursor: 42 }).resolves({ ok: true }); + + try { + for await (const entry of tx.iterate(Buffer.from('foo'))) { + expect(entry).to.deep.eq([Buffer.from('foo'), Buffer.from('a value')]); + throw new Error(); + } + } catch { + // no op + } + + expect( + channel.sendMessage.calledWith(LMDBMessageType.CLOSE_CURSOR, { + cursor: 42, + }), + ).to.be.true; + }); + + it('handles empty cursors', async () => { + channel.sendMessage + .withArgs(LMDBMessageType.START_CURSOR, { + key: Buffer.from('foo'), + reverse: false, + count: CURSOR_PAGE_SIZE, + db: Database.DATA, + onePage: false, + }) + .resolves({ + cursor: null, + entries: [], + done: true, + }); + + const arr = await toArray(tx.iterate(Buffer.from('foo'))); + expect(arr).to.deep.eq([]); + }); + + it('after close it does not accept requests', async () => { + tx.close(); + await expect(tx.get(Buffer.from('foo'))).eventually.to.be.rejectedWith(Error, 'Transaction is closed'); + }); +}); diff --git a/yarn-project/kv-store/src/lmdb-v2/read_transaction.ts b/yarn-project/kv-store/src/lmdb-v2/read_transaction.ts new file mode 100644 index 000000000000..eff44b23485f --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/read_transaction.ts @@ -0,0 +1,116 @@ +import { CURSOR_PAGE_SIZE, Database, type LMDBMessageChannel, LMDBMessageType } from './message.js'; + +export class ReadTransaction { + protected open = true; + + constructor(protected channel: LMDBMessageChannel) {} + + public close(): void { + if (!this.open) { + return; + } + this.open = false; + } + + protected assertIsOpen() { + if (!this.open) { + throw new Error('Transaction is closed'); + } + } + + public async get(key: Uint8Array): Promise { + this.assertIsOpen(); + const response = await this.channel.sendMessage(LMDBMessageType.GET, { keys: [key], db: Database.DATA }); + return response.values[0]?.[0] ?? undefined; + } + + public async getIndex(key: Uint8Array): Promise { + this.assertIsOpen(); + const response = await this.channel.sendMessage(LMDBMessageType.GET, { keys: [key], db: Database.INDEX }); + return response.values[0] ?? []; + } + + public async *iterate( + startKey: Uint8Array, + endKey?: Uint8Array, + reverse = false, + limit?: number, + ): AsyncIterable<[Uint8Array, Uint8Array]> { + yield* this.#iterate(Database.DATA, startKey, endKey, reverse, limit, vals => vals[0]); + } + + public async *iterateIndex( + startKey: Uint8Array, + endKey?: Uint8Array, + reverse = false, + limit?: number, + ): AsyncIterable<[Uint8Array, Uint8Array[]]> { + yield* this.#iterate(Database.INDEX, startKey, endKey, reverse, limit, vals => vals); + } + + async *#iterate( + db: string, + startKey: Uint8Array, + endKey: Uint8Array | undefined, + reverse: boolean, + limit: number | undefined, + map: (val: Uint8Array[]) => T, + ): AsyncIterable<[Uint8Array, T]> { + this.assertIsOpen(); + + const response = await this.channel.sendMessage(LMDBMessageType.START_CURSOR, { + key: startKey, + reverse, + count: typeof limit === 'number' ? Math.min(limit, CURSOR_PAGE_SIZE) : CURSOR_PAGE_SIZE, + onePage: typeof limit === 'number' && limit < CURSOR_PAGE_SIZE, + db, + }); + + const cursor = response.cursor; + let entries = response.entries; + let done = typeof cursor !== 'number'; + let count = 0; + + try { + // emit the first page and any subsequent pages in a while loop + // NB: end contition is in the middle of the while loop + while (entries.length > 0) { + for (const [key, values] of entries) { + if (typeof limit === 'number' && count >= limit) { + done = true; + break; + } + + if (endKey) { + const cmp = Buffer.compare(key, endKey); + if ((!reverse && cmp >= 0) || (reverse && cmp <= 0)) { + done = true; + break; + } + } + + count++; + yield [key, map(values)]; + } + + // cursor is null if DB returned everything in the first page + if (typeof cursor !== 'number' || done) { + break; + } + + const response = await this.channel.sendMessage(LMDBMessageType.ADVANCE_CURSOR, { + cursor, + count: CURSOR_PAGE_SIZE, + }); + + done = response.done; + entries = response.entries; + } + } finally { + // we might not have anything to close + if (typeof cursor === 'number') { + await this.channel.sendMessage(LMDBMessageType.CLOSE_CURSOR, { cursor }); + } + } + } +} diff --git a/yarn-project/kv-store/src/lmdb-v2/singleton.test.ts b/yarn-project/kv-store/src/lmdb-v2/singleton.test.ts new file mode 100644 index 000000000000..a39bd609b044 --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/singleton.test.ts @@ -0,0 +1,4 @@ +import { describeAztecSingleton } from '../interfaces/singleton_test_suite.js'; +import { openTmpStore } from './factory.js'; + +describeAztecSingleton('LMDBSingleValue', () => openTmpStore('test'), true); diff --git a/yarn-project/kv-store/src/lmdb-v2/singleton.ts b/yarn-project/kv-store/src/lmdb-v2/singleton.ts new file mode 100644 index 000000000000..46ee72285a6a --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/singleton.ts @@ -0,0 +1,34 @@ +import { Encoder } from 'msgpackr'; + +import { type AztecAsyncSingleton } from '../interfaces/singleton.js'; +import { type AztecLMDBStoreV2, execInReadTx, execInWriteTx } from './store.js'; +import { serializeKey } from './utils.js'; + +export class LMDBSingleValue implements AztecAsyncSingleton { + private key: Uint8Array; + private encoder = new Encoder(); + constructor(private store: AztecLMDBStoreV2, name: string) { + this.key = serializeKey(`singleton:${name}`, 'value'); + } + + getAsync(): Promise { + return execInReadTx(this.store, async tx => { + const val = await tx.get(this.key); + return val ? this.encoder.unpack(val) : undefined; + }); + } + + set(val: T): Promise { + return execInWriteTx(this.store, async tx => { + await tx.set(this.key, this.encoder.pack(val)); + return true; + }); + } + + delete(): Promise { + return execInWriteTx(this.store, async tx => { + await tx.remove(this.key); + return true; + }); + } +} diff --git a/yarn-project/kv-store/src/lmdb-v2/store.test.ts b/yarn-project/kv-store/src/lmdb-v2/store.test.ts new file mode 100644 index 000000000000..7209611ffb0e --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/store.test.ts @@ -0,0 +1,181 @@ +import { promiseWithResolvers } from '@aztec/foundation/promise'; +import { sleep } from '@aztec/foundation/sleep'; + +import { expect } from 'chai'; +import { stub } from 'sinon'; + +import { openTmpStore } from './factory.js'; +import { type ReadTransaction } from './read_transaction.js'; +import { type AztecLMDBStoreV2 } from './store.js'; + +const testMaxReaders = 4; + +describe('AztecLMDBStoreV2', () => { + let store: AztecLMDBStoreV2; + + beforeEach(async () => { + store = await openTmpStore('test', true, 10 * 1024 * 1024, testMaxReaders); + }); + + afterEach(async () => { + await store.delete(); + }); + + it('returns undefined for unset keys', async () => { + const tx = store.getReadTx(); + try { + expect(await tx.get(Buffer.from('foo'))).to.be.undefined; + expect(await tx.getIndex(Buffer.from('foo'))).to.deep.eq([]); + } finally { + tx.close(); + } + }); + + it('reads and writes in separate txs', async () => { + const writeChecks = promiseWithResolvers(); + const delay = promiseWithResolvers(); + const getValues = async (tx?: ReadTransaction) => { + let shouldClose = false; + if (!tx) { + tx = store.getCurrentWriteTx(); + if (!tx) { + shouldClose = true; + tx = store.getReadTx(); + } + } + + try { + const data = await tx.get(Buffer.from('foo')); + const index = await tx.getIndex(Buffer.from('foo')); + + return { + data, + index, + }; + } finally { + if (shouldClose) { + tx.close(); + } + } + }; + + // before doing any writes, we should have an empty db + expect(await getValues()).to.deep.eq({ + data: undefined, + index: [], + }); + + // start a write and run some checks but prevent the write tx from finishing immediately in order to run concurrent reads + const writeCommitted = store.transactionAsync(async writeTx => { + await writeTx.set(Buffer.from('foo'), Buffer.from('bar')); + await writeTx.setIndex(Buffer.from('foo'), Buffer.from('bar'), Buffer.from('baz')); + + // the write tx should make the writes visible immediately + expect(await getValues(writeTx)).to.deep.eq({ + data: Buffer.from('bar'), + index: [Buffer.from('bar'), Buffer.from('baz')], + }); + + // even without access to the tx, the writes should still be visible in this context + expect(await getValues()).to.deep.eq({ + data: Buffer.from('bar'), + index: [Buffer.from('bar'), Buffer.from('baz')], + }); + + writeChecks.resolve(); + + // prevent this write from ending + await delay.promise; + }); + + // we don't know a write is happening, so we should get an empty result back + expect(await getValues()).to.deep.eq({ + data: undefined, + index: [], + }); + + // wait for the batch checks to complete + await writeChecks.promise; + + // to batch is ready but uncommmitted, we should still see empty data + expect(await getValues()).to.deep.eq({ + data: undefined, + index: [], + }); + + delay.resolve(); + await writeCommitted; + + // now we should see the db update + expect(await getValues()).to.deep.eq({ + data: Buffer.from('bar'), + index: [Buffer.from('bar'), Buffer.from('baz')], + }); + }); + + it('should serialize writes correctly', async () => { + const key = Buffer.from('foo'); + const inc = () => + store.transactionAsync(async tx => { + const buf = Buffer.from((await store.getReadTx().get(key)) ?? Buffer.alloc(4)); + buf.writeUint32BE(buf.readUInt32BE() + 1); + await tx.set(key, buf); + }); + + const promises: Promise[] = []; + const rounds = 100; + for (let i = 0; i < rounds; i++) { + promises.push(inc()); + } + + await Promise.all(promises); + expect(Buffer.from((await store.getReadTx().get(key))!).readUint32BE()).to.eq(rounds); + }); + + it('guards against too many cursors being opened at the same time', async () => { + await store.transactionAsync(async tx => { + for (let i = 0; i < 100; i++) { + await tx.set(Buffer.from(String(i)), Buffer.from(String(i))); + } + }); + + const readTx = store.getReadTx(); + const cursors: AsyncIterator<[Uint8Array, Uint8Array]>[] = []; + + // fill up with cursors + for (let i = 0; i < testMaxReaders; i++) { + cursors.push(readTx.iterate(Buffer.from('1'))[Symbol.asyncIterator]()); + } + + // the first few iterators should be fine + await expect(Promise.all(cursors.slice(0, -1).map(it => it.next()))).eventually.to.deep.eq([ + { value: [Buffer.from('1'), Buffer.from('1')], done: false }, + { value: [Buffer.from('1'), Buffer.from('1')], done: false }, + { value: [Buffer.from('1'), Buffer.from('1')], done: false }, + ]); + + // this promise should be blocked until we release a cursor + const fn = stub(); + cursors.at(-1)!.next().then(fn, fn); + + expect(fn.notCalled).to.be.true; + await sleep(100); + expect(fn.notCalled).to.be.true; + + // but we can still do regular reads + await expect(readTx.get(Buffer.from('99'))).eventually.to.deep.eq(Buffer.from('99')); + + // early-return one of the cursors + await cursors[0].return!(); + + // this should have unblocked the last cursor from progressing + await sleep(10); + expect(fn.calledWith({ value: [Buffer.from('1'), Buffer.from('1')], done: false })).to.be.true; + + for (let i = 1; i < testMaxReaders; i++) { + await cursors[i].return!(); + } + + readTx.close(); + }); +}); diff --git a/yarn-project/kv-store/src/lmdb-v2/store.ts b/yarn-project/kv-store/src/lmdb-v2/store.ts new file mode 100644 index 000000000000..aff65119d8c9 --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/store.ts @@ -0,0 +1,210 @@ +import { type Logger, createLogger } from '@aztec/foundation/log'; +import { Semaphore, SerialQueue } from '@aztec/foundation/queue'; +import { MsgpackChannel, NativeLMDBStore } from '@aztec/native'; + +import { AsyncLocalStorage } from 'async_hooks'; +import { rm } from 'fs/promises'; + +import type { AztecAsyncArray } from '../interfaces/array.js'; +import type { Key, StoreSize } from '../interfaces/common.js'; +import type { AztecAsyncCounter } from '../interfaces/counter.js'; +import type { AztecAsyncMap, AztecAsyncMultiMap } from '../interfaces/map.js'; +import type { AztecAsyncSet } from '../interfaces/set.js'; +import type { AztecAsyncSingleton } from '../interfaces/singleton.js'; +import type { AztecAsyncKVStore } from '../interfaces/store.js'; +import { LMDBMap, LMDBMultiMap } from './map.js'; +import { + Database, + type LMDBMessageChannel, + LMDBMessageType, + type LMDBRequestBody, + type LMDBResponseBody, +} from './message.js'; +import { ReadTransaction } from './read_transaction.js'; +import { LMDBSingleValue } from './singleton.js'; +import { WriteTransaction } from './write_transaction.js'; + +export class AztecLMDBStoreV2 implements AztecAsyncKVStore, LMDBMessageChannel { + private channel: MsgpackChannel; + private writerCtx = new AsyncLocalStorage(); + private writerQueue = new SerialQueue(); + private availableCursors: Semaphore; + + private constructor( + private dataDir: string, + mapSize: number, + maxReaders: number, + private log: Logger, + private cleanup?: () => Promise, + ) { + this.log.info(`Starting data store with maxReaders ${maxReaders}`); + this.channel = new MsgpackChannel(new NativeLMDBStore(dataDir, mapSize, maxReaders)); + // leave one reader to always be available for regular, atomic, reads + this.availableCursors = new Semaphore(maxReaders - 1); + } + + private async start() { + this.writerQueue.start(); + + await this.sendMessage(LMDBMessageType.OPEN_DATABASE, { + db: Database.DATA, + uniqueKeys: true, + }); + + await this.sendMessage(LMDBMessageType.OPEN_DATABASE, { + db: Database.INDEX, + uniqueKeys: false, + }); + } + + public static async new( + dataDir: string, + dbMapSizeKb: number = 10 * 1024 * 1024, + maxReaders: number = 16, + cleanup?: () => Promise, + log = createLogger('kv-store:lmdb-v2'), + ) { + const db = new AztecLMDBStoreV2(dataDir, dbMapSizeKb, maxReaders, log, cleanup); + await db.start(); + return db; + } + + public getReadTx(): ReadTransaction { + return new ReadTransaction(this); + } + + public getCurrentWriteTx(): WriteTransaction | undefined { + const currentWrite = this.writerCtx.getStore(); + return currentWrite; + } + + openMap(name: string): AztecAsyncMap { + return new LMDBMap(this, name); + } + + openMultiMap(name: string): AztecAsyncMultiMap { + return new LMDBMultiMap(this, name); + } + + openSingleton(name: string): AztecAsyncSingleton { + return new LMDBSingleValue(this, name); + } + + openArray(_name: string): AztecAsyncArray { + throw new Error('Not implemented'); + } + + openSet(_name: string): AztecAsyncSet { + throw new Error('Not implemented'); + } + + openCounter(_name: string): AztecAsyncCounter { + throw new Error('Not implemented'); + } + + async transactionAsync>>( + callback: (tx: WriteTransaction) => Promise, + ): Promise { + // transactionAsync might be called recursively + // send any writes to the parent tx, but don't close it + // if the callback throws then the parent tx will rollback automatically + const currentTx = this.getCurrentWriteTx(); + if (currentTx) { + return await callback(currentTx); + } + + return this.writerQueue.put(async () => { + const tx = new WriteTransaction(this); + try { + const res = await this.writerCtx.run(tx, callback, tx); + await tx.commit(); + return res; + } catch (err) { + this.log.error(`Failed to commit transaction`, err); + throw err; + } finally { + tx.close(); + } + }); + } + + clear(): Promise { + return Promise.resolve(); + } + + fork(): Promise { + throw new Error('Not implemented'); + } + + async delete(): Promise { + await this.close(); + await rm(this.dataDir, { recursive: true, force: true }); + this.log.verbose(`Deleted database files at ${this.dataDir}`); + await this.cleanup?.(); + } + + async close() { + await this.writerQueue.cancel(); + await this.sendMessage(LMDBMessageType.CLOSE, undefined); + } + + public async sendMessage( + msgType: T, + body: LMDBRequestBody[T], + ): Promise { + if (msgType === LMDBMessageType.START_CURSOR) { + await this.availableCursors.acquire(); + } + + let response: LMDBResponseBody[T] | undefined = undefined; + try { + ({ response } = await this.channel.sendMessage(msgType, body)); + return response; + } finally { + if ( + (msgType === LMDBMessageType.START_CURSOR && response === undefined) || + msgType === LMDBMessageType.CLOSE_CURSOR || + // it's possible for a START_CURSOR command to not return a cursor (e.g. db is empty) + (msgType === LMDBMessageType.START_CURSOR && + typeof (response as LMDBResponseBody[LMDBMessageType.START_CURSOR]).cursor !== 'number') + ) { + this.availableCursors.release(); + } + } + } + + public async estimateSize(): Promise { + const resp = await this.sendMessage(LMDBMessageType.STATS, undefined); + return { + mappingSize: Number(resp.dbMapSizeBytes), + actualSize: resp.stats.reduce((s, db) => Number(db.totalUsedSize) + s, 0), + numItems: resp.stats.reduce((s, db) => Number(db.numDataItems) + s, 0), + }; + } +} + +export function execInWriteTx(store: AztecLMDBStoreV2, fn: (tx: WriteTransaction) => Promise): Promise { + const currentWrite = store.getCurrentWriteTx(); + if (currentWrite) { + return fn(currentWrite); + } else { + return store.transactionAsync(fn); + } +} + +export async function execInReadTx( + store: AztecLMDBStoreV2, + fn: (tx: ReadTransaction) => T | Promise, +): Promise { + const currentWrite = store.getCurrentWriteTx(); + if (currentWrite) { + return await fn(currentWrite); + } else { + const tx = store.getReadTx(); + try { + return await fn(tx); + } finally { + tx.close(); + } + } +} diff --git a/yarn-project/kv-store/src/lmdb-v2/utils.test.ts b/yarn-project/kv-store/src/lmdb-v2/utils.test.ts new file mode 100644 index 000000000000..9ae0aa278f22 --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/utils.test.ts @@ -0,0 +1,186 @@ +import { expect } from 'chai'; + +import { dedupeSortedArray, findIndexInSortedArray, insertIntoSortedArray, merge, removeAnyOf } from './utils.js'; + +const cmp = (a: number, b: number) => (a === b ? 0 : a < b ? -1 : 1); + +describe('utils', () => { + it('removeDuplicatesFromSortedArray', () => { + const tests = [ + [[1], [1]], + [[1, 1], [1]], + [[1, 1, 1], [1]], + [[1, 1, 1, 1], [1]], + [ + [1, 1, 2, 3, 4], + [1, 2, 3, 4], + ], + [ + [1, 2, 2, 3, 4], + [1, 2, 3, 4], + ], + [ + [1, 2, 3, 3, 4], + [1, 2, 3, 4], + ], + [ + [1, 2, 3, 4, 4], + [1, 2, 3, 4], + ], + [ + [1, 2, 3, 4, 4, 4], + [1, 2, 3, 4], + ], + [ + [1, 2, 3, 4], + [1, 2, 3, 4], + ], + [[], []], + ]; + + for (const [arr, expected] of tests) { + dedupeSortedArray(arr, cmp); + expect(arr).to.deep.eq(expected); + } + }); + + describe('merge', () => { + it('merges', () => { + const tests = [ + [ + [1, 4, 5, 9], + [0, 1, 3, 4, 6, 6, 10], + [0, 1, 1, 3, 4, 4, 5, 6, 6, 9, 10], + ], + [[], [], []], + [[], [1, 1, 1], [1, 1, 1]], + [[], [1, 2, 3], [1, 2, 3]], + [[1, 2, 3], [], [1, 2, 3]], + [ + [1, 2, 3], + [1, 2, 3], + [1, 1, 2, 2, 3, 3], + ], + [ + [4, 5, 6], + [1, 2, 3], + [1, 2, 3, 4, 5, 6], + ], + [ + [1, 2, 3], + [4, 5, 6], + [1, 2, 3, 4, 5, 6], + ], + ]; + for (const [arr, toMerge, expected] of tests) { + merge(arr, toMerge, cmp); + expect(arr).to.deep.eq(expected); + } + }); + }); + + it('binarySearch', () => { + const tests: [number[], number, number][] = [ + [[], 1, -1], + + [[1], 1, 0], + [[1], 2, -1], + [[1], 0, -1], + + [[1, 2], 1, 0], + [[1, 2], 2, 1], + [[1, 2], 3, -1], + [[1, 2], 0, -1], + + [[1, 2, 3], 2, 1], + [[1, 2, 3], 3, 2], + [[1, 2, 3], 4, -1], + [[1, 2, 3], 0, -1], + [[1, 2, 3], 1, 0], + [[1, 2, 3], 2, 1], + [[1, 2, 3], 3, 2], + [[1, 2, 3], 4, -1], + [[1, 2, 3], 0, -1], + + [[1, 2, 3, 4], 1, 0], + [[1, 2, 3, 4], 2, 1], + [[1, 2, 3, 4], 3, 2], + [[1, 2, 3, 4], 4, 3], + [[1, 2, 3, 4], 5, -1], + [[1, 2, 3, 4], 0, -1], + ]; + for (const [arr, needle, expected] of tests) { + expect(findIndexInSortedArray(arr, needle, cmp)).to.eq(expected); + } + }); +}); + +describe('insertIntoSortedArray', () => { + it('inserts into empty array', () => { + const arr: number[] = []; + insertIntoSortedArray(arr, 1, cmp); + expect(arr).to.deep.equal([1]); + }); + + it('inserts at beginning', () => { + const arr = [2, 3, 4]; + insertIntoSortedArray(arr, 1, cmp); + expect(arr).to.deep.equal([1, 2, 3, 4]); + }); + + it('inserts at end', () => { + const arr = [1, 2, 3]; + insertIntoSortedArray(arr, 4, cmp); + expect(arr).to.deep.equal([1, 2, 3, 4]); + }); + + it('inserts in middle', () => { + const arr = [1, 3, 5]; + insertIntoSortedArray(arr, 4, cmp); + expect(arr).to.deep.equal([1, 3, 4, 5]); + }); + + it('handles duplicates', () => { + const arr = [1, 2, 2, 3]; + insertIntoSortedArray(arr, 2, cmp); + expect(arr).to.deep.equal([1, 2, 2, 2, 3]); + }); + + it('maintains order with multiple inserts', () => { + const arr: number[] = []; + [3, 1, 4, 1, 5, 9, 2, 6].forEach(n => insertIntoSortedArray(arr, n, cmp)); + expect(arr).to.deep.equal([1, 1, 2, 3, 4, 5, 6, 9]); + }); +}); + +describe('removeAnyOf', () => { + it('removes single matching value', () => { + const arr = [1, 2, 3, 4]; + removeAnyOf(arr, [2], cmp); + expect(arr).to.deep.equal([1, 3, 4]); + }); + + it('removes multiple matching values', () => { + const arr = [1, 2, 3, 4, 5]; + removeAnyOf(arr, [2, 4], cmp); + expect(arr).to.deep.equal([1, 3, 5]); + }); + + it('handles empty removal array', () => { + const arr = [1, 2, 3]; + removeAnyOf(arr, [], cmp); + expect(arr).to.deep.equal([1, 2, 3]); + }); + + it('handles no matches', () => { + const arr = [1, 3, 5]; + removeAnyOf(arr, [2, 4], cmp); + expect(arr).to.deep.equal([1, 3, 5]); + }); + + it('removes duplicates', () => { + const arr = [1, 2, 2, 2, 3]; + removeAnyOf(arr, [2], cmp); + expect(arr).to.deep.equal([1, 3]); + }); +}); diff --git a/yarn-project/kv-store/src/lmdb-v2/utils.ts b/yarn-project/kv-store/src/lmdb-v2/utils.ts new file mode 100644 index 000000000000..1cfa00c04156 --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/utils.ts @@ -0,0 +1,150 @@ +import { MAXIMUM_KEY, fromBufferKey, toBufferKey } from 'ordered-binary'; + +import { type Key } from '../interfaces/common.js'; + +type Cmp = (a: T, b: T) => -1 | 0 | 1; + +export function dedupeSortedArray(arr: T[], cmp: Cmp): void { + for (let i = 0; i < arr.length; i++) { + let j = i + 1; + for (; j < arr.length; j++) { + const res = cmp(arr[i], arr[j]); + if (res === 0) { + continue; + } else if (res < 0) { + break; + } else { + throw new Error('Array not sorted'); + } + } + + if (j - i > 1) { + arr.splice(i + 1, j - i - 1); + } + } +} + +export function insertIntoSortedArray(arr: T[], item: T, cmp: (a: T, b: T) => number): void { + let left = 0; + let right = arr.length; + + while (left < right) { + const mid = (left + right) >> 1; + const comparison = cmp(arr[mid], item); + + if (comparison < 0) { + left = mid + 1; + } else { + right = mid; + } + } + + arr.splice(left, 0, item); +} + +export function findIndexInSortedArray(values: T[], needle: N, cmp: (a: T, b: N) => number): number { + let start = 0; + let end = values.length - 1; + + while (start <= end) { + const mid = start + (((end - start) / 2) | 0); + const res = cmp(values[mid], needle); + if (res === 0) { + return mid; + } else if (res > 0) { + end = mid - 1; + } else { + start = mid + 1; + } + } + + return -1; +} + +export function findInSortedArray(values: T[], needle: N, cmp: (a: T, b: N) => number): T | undefined { + const idx = findIndexInSortedArray(values, needle, cmp); + return idx > -1 ? values[idx] : undefined; +} + +export function removeAnyOf(arr: T[], vals: N[], cmp: (a: T, b: N) => -1 | 0 | 1): void { + let writeIdx = 0; + let readIdx = 0; + let valIdx = 0; + + while (readIdx < arr.length && valIdx < vals.length) { + const comparison = cmp(arr[readIdx], vals[valIdx]); + + if (comparison < 0) { + arr[writeIdx++] = arr[readIdx++]; + } else if (comparison > 0) { + valIdx++; + } else { + readIdx++; + } + } + + while (readIdx < arr.length) { + arr[writeIdx++] = arr[readIdx++]; + } + + arr.length = writeIdx; +} + +export function removeFromSortedArray(arr: T[], val: N, cmp: (a: T, b: N) => -1 | 0 | 1) { + const idx = findIndexInSortedArray(arr, val, cmp); + if (idx > -1) { + arr.splice(idx, 1); + } +} + +export function merge(arr: T[], toInsert: T[], cmp: (a: T, b: T) => -1 | 0 | 1): void { + const result = new Array(arr.length + toInsert.length); + let i = 0, + j = 0, + k = 0; + + while (i < arr.length && j < toInsert.length) { + result[k++] = cmp(arr[i], toInsert[j]) <= 0 ? arr[i++] : toInsert[j++]; + } + + while (i < arr.length) { + result[k++] = arr[i++]; + } + while (j < toInsert.length) { + result[k++] = toInsert[j++]; + } + + for (i = 0; i < result.length; i++) { + arr[i] = result[i]; + } + arr.length = result.length; +} + +export function keyCmp(a: [Uint8Array, Uint8Array[] | null], b: [Uint8Array, Uint8Array[] | null]): -1 | 0 | 1 { + return Buffer.compare(a[0], b[0]); +} + +export function singleKeyCmp(a: [Uint8Array, Uint8Array[] | null], b: Uint8Array): -1 | 0 | 1 { + return Buffer.compare(a[0], b); +} + +export function minKey(prefix: string) { + return toBufferKey([prefix]); +} + +export function maxKey(prefix: string) { + return toBufferKey([prefix, MAXIMUM_KEY]); +} + +export function serializeKey(prefix: string, key: Key): Buffer { + return toBufferKey([prefix, key]); +} + +export function deserializeKey(prefix: string, key: Uint8Array): K | false { + const buf = Buffer.from(key); + const parsed = fromBufferKey(buf); + if (!Array.isArray(parsed) || parsed[0] !== prefix) { + return false; + } + return parsed[1] as K; +} diff --git a/yarn-project/kv-store/src/lmdb-v2/write_transaction.test.ts b/yarn-project/kv-store/src/lmdb-v2/write_transaction.test.ts new file mode 100644 index 000000000000..216e1870c37c --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/write_transaction.test.ts @@ -0,0 +1,329 @@ +import { toArray } from '@aztec/foundation/iterable'; + +import { expect } from 'chai'; +import { type SinonStubbedInstance, stub } from 'sinon'; + +import { type Batch, CURSOR_PAGE_SIZE, Database, type LMDBMessageChannel, LMDBMessageType } from './message.js'; +import { WriteTransaction } from './write_transaction.js'; + +describe('WriteTransaction', () => { + let channel: SinonStubbedInstance; + let tx: WriteTransaction; + + beforeEach(() => { + channel = stub({ + sendMessage: () => {}, + } as any); + tx = new WriteTransaction(channel); + + channel.sendMessage.resolves({ ok: true }); + }); + + it('accumulatest writes', async () => { + await tx.setIndex(Buffer.from('foo'), Buffer.from('1'), Buffer.from('2'), Buffer.from('3')); + await tx.removeIndex(Buffer.from('bar'), Buffer.from('1'), Buffer.from('2')); + await tx.set(Buffer.from('foo'), Buffer.from('a')); + await tx.remove(Buffer.from('baz')); + + await tx.commit(); + expect( + channel.sendMessage.calledWith(LMDBMessageType.BATCH, { + batches: new Map([ + [ + Database.INDEX, + { + removeEntries: [[Buffer.from('bar'), [Buffer.from('1'), Buffer.from('2')]]], + addEntries: [[Buffer.from('foo'), [Buffer.from('1'), Buffer.from('2'), Buffer.from('3')]]], + }, + ], + [ + Database.DATA, + { + removeEntries: [[Buffer.from('baz'), null]], + addEntries: [[Buffer.from('foo'), [Buffer.from('a')]]], + }, + ], + ]), + }), + ).to.be.true; + }); + + it('correctly manages index batch', async () => { + await tx.setIndex(Buffer.from('foo'), Buffer.from('1'), Buffer.from('2'), Buffer.from('3')); + expect(tx.indexBatch).to.deep.eq({ + removeEntries: [], + addEntries: [[Buffer.from('foo'), [Buffer.from('1'), Buffer.from('2'), Buffer.from('3')]]], + }); + + await tx.setIndex(Buffer.from('foo'), Buffer.from('4')); + expect(tx.indexBatch).to.deep.eq({ + removeEntries: [], + addEntries: [[Buffer.from('foo'), [Buffer.from('1'), Buffer.from('2'), Buffer.from('3'), Buffer.from('4')]]], + }); + + await tx.removeIndex(Buffer.from('foo'), Buffer.from('5')); + expect(tx.indexBatch).to.deep.eq({ + removeEntries: [[Buffer.from('foo'), [Buffer.from('5')]]], + addEntries: [[Buffer.from('foo'), [Buffer.from('1'), Buffer.from('2'), Buffer.from('3'), Buffer.from('4')]]], + }); + + await tx.removeIndex(Buffer.from('foo'), Buffer.from('1'), Buffer.from('2'), Buffer.from('6')); + expect(tx.indexBatch).to.deep.eq({ + removeEntries: [[Buffer.from('foo'), [Buffer.from('1'), Buffer.from('2'), Buffer.from('5'), Buffer.from('6')]]], + addEntries: [[Buffer.from('foo'), [Buffer.from('3'), Buffer.from('4')]]], + }); + + await tx.removeIndex(Buffer.from('foo')); + expect(tx.indexBatch).to.deep.eq({ + removeEntries: [[Buffer.from('foo'), null]], + addEntries: [], + }); + + await tx.removeIndex(Buffer.from('foo'), Buffer.from('2')); + expect(tx.indexBatch).to.deep.eq({ + removeEntries: [[Buffer.from('foo'), [Buffer.from('2')]]], + addEntries: [], + }); + await tx.setIndex(Buffer.from('foo'), Buffer.from('2')); + expect(tx.indexBatch).to.deep.eq({ + removeEntries: [], + addEntries: [[Buffer.from('foo'), [Buffer.from('2')]]], + }); + }); + + it('correctly meanages pending data reads', async () => { + channel.sendMessage.resolves({ values: [null] }); + expect(await tx.get(Buffer.from('foo'))).to.deep.eq(undefined); + + await tx.set(Buffer.from('foo'), Buffer.from('1')); + expect(await tx.get(Buffer.from('foo'))).to.deep.eq(Buffer.from('1')); + + await tx.set(Buffer.from('foo'), Buffer.from('2')); + expect(await tx.get(Buffer.from('foo'))).to.deep.eq(Buffer.from('2')); + + await tx.remove(Buffer.from('foo')); + expect(await tx.get(Buffer.from('foo'))).to.deep.eq(undefined); + }); + + it('correctly meanages pending index reads', async () => { + channel.sendMessage.resolves({ values: [[Buffer.from('1')]] }); + expect(await tx.getIndex(Buffer.from('foo'))).to.deep.eq([Buffer.from('1')]); + + await tx.setIndex(Buffer.from('foo'), Buffer.from('1')); + expect(await tx.getIndex(Buffer.from('foo'))).to.deep.eq([Buffer.from('1')]); + + await tx.setIndex(Buffer.from('foo'), Buffer.from('2')); + expect(await tx.getIndex(Buffer.from('foo'))).to.deep.eq([Buffer.from('1'), Buffer.from('2')]); + + await tx.removeIndex(Buffer.from('foo'), Buffer.from('1')); + expect(await tx.getIndex(Buffer.from('foo'))).to.deep.eq([Buffer.from('2')]); + + await tx.removeIndex(Buffer.from('foo')); + expect(await tx.getIndex(Buffer.from('foo'))).to.deep.eq([]); + }); + + it('correctly iterates over pending data', async () => { + channel.sendMessage.withArgs(LMDBMessageType.START_CURSOR).resolves({ cursor: null, entries: [] }); + channel.sendMessage.withArgs(LMDBMessageType.ADVANCE_CURSOR).rejects(new Error('Cursor empty')); + + await tx.set(Buffer.from('foo'), Buffer.from('1')); + await tx.set(Buffer.from('bar'), Buffer.from('2')); + await tx.set(Buffer.from('baz'), Buffer.from('3')); + + const entries = await toArray(tx.iterate(Buffer.from('bar'))); + expect(entries).to.deep.eq([ + [Buffer.from('bar'), Buffer.from('2')], + [Buffer.from('baz'), Buffer.from('3')], + [Buffer.from('foo'), Buffer.from('1')], + ]); + }); + + it('correctly iterates over uncommitted and committed data', async () => { + channel.sendMessage.withArgs(LMDBMessageType.START_CURSOR).resolves({ + cursor: 42, + entries: [[Buffer.from('bar'), [Buffer.from('3')]]], + }); + channel.sendMessage + .withArgs(LMDBMessageType.ADVANCE_CURSOR, { cursor: 42, count: CURSOR_PAGE_SIZE }) + .resolves({ entries: [[Buffer.from('baz'), [Buffer.from('3')]]], done: true }); + + await tx.set(Buffer.from('foo'), Buffer.from('1')); + await tx.set(Buffer.from('bar'), Buffer.from('2')); + + const entries = await toArray(tx.iterate(Buffer.from('bar'))); + expect(entries).to.deep.eq([ + [Buffer.from('bar'), Buffer.from('2')], + [Buffer.from('baz'), Buffer.from('3')], + [Buffer.from('foo'), Buffer.from('1')], + ]); + }); + + it('correctly iterates over overritten data', async () => { + channel.sendMessage.withArgs(LMDBMessageType.START_CURSOR).resolves({ + cursor: 42, + entries: [[Buffer.from('baz'), [Buffer.from('3')]]], + }); + channel.sendMessage + .withArgs(LMDBMessageType.ADVANCE_CURSOR, { cursor: 42, count: CURSOR_PAGE_SIZE }) + .resolves({ entries: [[Buffer.from('foo'), [Buffer.from('1')]]], done: true }); + + await tx.remove(Buffer.from('foo')); + await tx.set(Buffer.from('bar'), Buffer.from('2')); + await tx.set(Buffer.from('baz'), Buffer.from('42')); + await tx.set(Buffer.from('quux'), Buffer.from('123')); + + const entries = await toArray(tx.iterate(Buffer.from('bar'))); + expect(entries).to.deep.eq([ + [Buffer.from('bar'), Buffer.from('2')], + [Buffer.from('baz'), Buffer.from('42')], + [Buffer.from('quux'), Buffer.from('123')], + ]); + }); + + it('correctly iterates until end key', async () => { + channel.sendMessage.withArgs(LMDBMessageType.START_CURSOR).resolves({ + cursor: 42, + entries: [[Buffer.from('bar'), [Buffer.from('1')]]], + }); + channel.sendMessage + .withArgs(LMDBMessageType.ADVANCE_CURSOR) + .resolves({ entries: [[Buffer.from('baz'), [Buffer.from('3')]]], done: true }); + + await tx.remove(Buffer.from('foo')); + await tx.set(Buffer.from('bar'), Buffer.from('2')); + await tx.set(Buffer.from('baz'), Buffer.from('42')); + await tx.set(Buffer.from('quux'), Buffer.from('123')); + + const entries = await toArray(tx.iterate(Buffer.from('bar'), Buffer.from('foo'))); + expect(entries).to.deep.eq([ + [Buffer.from('bar'), Buffer.from('2')], + [Buffer.from('baz'), Buffer.from('42')], + ]); + }); + + it('correctly iterates in reverse', async () => { + channel.sendMessage.withArgs(LMDBMessageType.START_CURSOR).resolves({ + cursor: null, + entries: [[Buffer.from('baz'), [Buffer.from('3')]]], + }); + + await tx.remove(Buffer.from('foo')); + await tx.set(Buffer.from('bar'), Buffer.from('2')); + await tx.set(Buffer.from('baz'), Buffer.from('42')); + await tx.set(Buffer.from('quux'), Buffer.from('123')); + + const entries = await toArray(tx.iterate(Buffer.from('quux'), undefined, true)); + expect(entries).to.deep.eq([ + [Buffer.from('quux'), Buffer.from('123')], + [Buffer.from('baz'), Buffer.from('42')], + [Buffer.from('bar'), Buffer.from('2')], + ]); + }); + + it('correctly iterates in reverse with end key', async () => { + channel.sendMessage.withArgs(LMDBMessageType.START_CURSOR).resolves({ + cursor: 42, + entries: [[Buffer.from('baz'), [Buffer.from('3')]]], + }); + channel.sendMessage + .withArgs(LMDBMessageType.ADVANCE_CURSOR) + .resolves({ entries: [[Buffer.from('bar'), [Buffer.from('3')]]], done: true }); + + await tx.remove(Buffer.from('foo')); + await tx.set(Buffer.from('bar'), Buffer.from('2')); + await tx.set(Buffer.from('baz'), Buffer.from('42')); + await tx.set(Buffer.from('quux'), Buffer.from('123')); + + const entries = await toArray(tx.iterate(Buffer.from('quux'), Buffer.from('baz'), true)); + expect(entries).to.deep.eq([[Buffer.from('quux'), Buffer.from('123')]]); + }); + + it('correctly iterates over pending index data', async () => { + channel.sendMessage.withArgs(LMDBMessageType.START_CURSOR).resolves({ + cursor: 42, + entries: [[Buffer.from('baz'), [Buffer.from('3'), Buffer.from('6')]]], + }); + channel.sendMessage.withArgs(LMDBMessageType.ADVANCE_CURSOR).resolves({ + entries: [[Buffer.from('foo'), [Buffer.from('2'), Buffer.from('4'), Buffer.from('8')]]], + done: true, + }); + + await tx.setIndex(Buffer.from('foo'), Buffer.from('1')); + await tx.removeIndex(Buffer.from('foo'), Buffer.from('8')); + await tx.setIndex(Buffer.from('bar'), Buffer.from('2'), Buffer.from('3')); + await tx.setIndex(Buffer.from('baz'), Buffer.from('42')); + + const entries = await toArray(tx.iterateIndex(Buffer.from('bar'))); + expect(entries).to.deep.eq([ + [Buffer.from('bar'), [Buffer.from('2'), Buffer.from('3')]], + [Buffer.from('baz'), [Buffer.from('3'), Buffer.from('42'), Buffer.from('6')]], + [Buffer.from('foo'), [Buffer.from('1'), Buffer.from('2'), Buffer.from('4')]], + ]); + }); + + it('correctly iterates over pending index data up to end key', async () => { + channel.sendMessage.withArgs(LMDBMessageType.START_CURSOR).resolves({ cursor: null, entries: [], done: true }); + channel.sendMessage.withArgs(LMDBMessageType.ADVANCE_CURSOR).rejects(new Error('Should not bew called')); + + await tx.setIndex(Buffer.from('foo'), Buffer.from('1')); + await tx.removeIndex(Buffer.from('foo'), Buffer.from('8')); + await tx.setIndex(Buffer.from('bar'), Buffer.from('2'), Buffer.from('3')); + await tx.setIndex(Buffer.from('baz'), Buffer.from('42')); + + const entries = await toArray(tx.iterateIndex(Buffer.from('bar'), Buffer.from('baz'))); + expect(entries).to.deep.eq([[Buffer.from('bar'), [Buffer.from('2'), Buffer.from('3')]]]); + }); + + it('correctly iterates over pending index data in reverse', async () => { + channel.sendMessage.withArgs(LMDBMessageType.START_CURSOR).resolves({ + cursor: 42, + entries: [[Buffer.from('foo'), [Buffer.from('2'), Buffer.from('4'), Buffer.from('8')]]], + }); + channel.sendMessage.withArgs(LMDBMessageType.ADVANCE_CURSOR).resolves({ + entries: [[Buffer.from('baz'), [Buffer.from('3'), Buffer.from('6')]]], + done: true, + }); + + await tx.setIndex(Buffer.from('foo'), Buffer.from('1')); + await tx.removeIndex(Buffer.from('foo'), Buffer.from('8')); + await tx.setIndex(Buffer.from('bar'), Buffer.from('2'), Buffer.from('3')); + await tx.setIndex(Buffer.from('baz'), Buffer.from('42')); + await tx.setIndex(Buffer.from('quux'), Buffer.from('1123')); + + const entries = await toArray(tx.iterateIndex(Buffer.from('foo'), undefined, true)); + expect(entries).to.deep.eq([ + [Buffer.from('foo'), [Buffer.from('1'), Buffer.from('2'), Buffer.from('4')]], + [Buffer.from('baz'), [Buffer.from('3'), Buffer.from('42'), Buffer.from('6')]], + [Buffer.from('bar'), [Buffer.from('2'), Buffer.from('3')]], + ]); + }); + + it('correctly iterates over pending index data in reverse up to given end key', async () => { + channel.sendMessage.withArgs(LMDBMessageType.START_CURSOR).resolves({ + cursor: 42, + entries: [[Buffer.from('foo'), [Buffer.from('2'), Buffer.from('4'), Buffer.from('8')]]], + }); + channel.sendMessage.withArgs(LMDBMessageType.ADVANCE_CURSOR).resolves({ + entries: [[Buffer.from('baz'), [Buffer.from('3'), Buffer.from('6')]]], + done: true, + }); + + await tx.setIndex(Buffer.from('foo'), Buffer.from('1')); + await tx.removeIndex(Buffer.from('foo'), Buffer.from('8')); + await tx.setIndex(Buffer.from('bar'), Buffer.from('2'), Buffer.from('3')); + await tx.setIndex(Buffer.from('baz'), Buffer.from('42')); + await tx.setIndex(Buffer.from('quux'), Buffer.from('1123')); + + const entries = await toArray(tx.iterateIndex(Buffer.from('foo'), Buffer.from('bar'), true)); + expect(entries).to.deep.eq([ + [Buffer.from('foo'), [Buffer.from('1'), Buffer.from('2'), Buffer.from('4')]], + [Buffer.from('baz'), [Buffer.from('3'), Buffer.from('42'), Buffer.from('6')]], + ]); + }); + + it('refuses to commit if closed', async () => { + await tx.set(Buffer.from('foo'), Buffer.from('1')); + tx.close(); + await expect(tx.commit()).eventually.to.be.rejectedWith(Error, 'Transaction is closed'); + }); +}); diff --git a/yarn-project/kv-store/src/lmdb-v2/write_transaction.ts b/yarn-project/kv-store/src/lmdb-v2/write_transaction.ts new file mode 100644 index 000000000000..dbe34326bd0f --- /dev/null +++ b/yarn-project/kv-store/src/lmdb-v2/write_transaction.ts @@ -0,0 +1,314 @@ +import { type Batch, Database, LMDBMessageType } from './message.js'; +import { ReadTransaction } from './read_transaction.js'; +import { + dedupeSortedArray, + findInSortedArray, + findIndexInSortedArray, + insertIntoSortedArray, + keyCmp, + merge, + removeAnyOf, + removeFromSortedArray, + singleKeyCmp, +} from './utils.js'; + +export class WriteTransaction extends ReadTransaction { + // exposed for tests + public readonly dataBatch: Batch = { + addEntries: [], + removeEntries: [], + }; + public readonly indexBatch: Batch = { + addEntries: [], + removeEntries: [], + }; + + set(key: Uint8Array, value: Uint8Array): Promise { + this.assertIsOpen(); + + const addEntry = findInSortedArray(this.dataBatch.addEntries, key, singleKeyCmp); + if (!addEntry) { + insertIntoSortedArray(this.dataBatch.addEntries, [key, [value]], keyCmp); + } else { + addEntry[1] = [value]; + } + + const removeEntryIndex = findIndexInSortedArray(this.dataBatch.removeEntries, key, singleKeyCmp); + if (removeEntryIndex > -1) { + this.dataBatch.removeEntries.splice(removeEntryIndex, 1); + } + + return Promise.resolve(); + } + + remove(key: Uint8Array): Promise { + const removeEntryIndex = findIndexInSortedArray(this.dataBatch.removeEntries, key, singleKeyCmp); + if (removeEntryIndex === -1) { + this.dataBatch.removeEntries.push([key, null]); + } + + const addEntryIndex = findIndexInSortedArray(this.dataBatch.addEntries, key, singleKeyCmp); + if (addEntryIndex > -1) { + this.dataBatch.addEntries.splice(addEntryIndex, 1); + } + + return Promise.resolve(); + } + + public override async get(key: Buffer): Promise { + this.assertIsOpen(); + + const addEntry = findInSortedArray(this.dataBatch.addEntries, key, singleKeyCmp); + if (addEntry) { + return addEntry[1][0]; + } + const removeEntryIdx = findIndexInSortedArray(this.dataBatch.removeEntries, key, singleKeyCmp); + if (removeEntryIdx > -1) { + return undefined; + } + + return await super.get(key); + } + + setIndex(key: Buffer, ...values: Buffer[]): Promise { + this.assertIsOpen(); + + const addEntries = findInSortedArray(this.indexBatch.addEntries, key, singleKeyCmp); + const removeEntries = findInSortedArray(this.indexBatch.removeEntries, key, singleKeyCmp); + + if (removeEntries) { + if (removeEntries[1]) { + // check if we were deleting these values and update + removeAnyOf(removeEntries[1], values, Buffer.compare); + } + + if (!removeEntries[1] || removeEntries[1].length === 0) { + // either we were deleting the entire key previously + // or after cleaning up duplicates, we don't have anything else to delete + removeFromSortedArray(this.indexBatch.removeEntries, removeEntries, keyCmp); + } + } + + if (addEntries) { + merge(addEntries[1], values, Buffer.compare); + dedupeSortedArray(addEntries[1], Buffer.compare); + } else { + insertIntoSortedArray(this.indexBatch.addEntries, [key, values], keyCmp); + } + + return Promise.resolve(); + } + + removeIndex(key: Buffer, ...values: Buffer[]): Promise { + this.assertIsOpen(); + + const addEntries = findInSortedArray(this.indexBatch.addEntries, key, singleKeyCmp); + const removeEntries = findInSortedArray(this.indexBatch.removeEntries, key, singleKeyCmp); + + if (values.length === 0) { + // special case, we're deleting the entire key + if (addEntries) { + removeFromSortedArray(this.indexBatch.addEntries, addEntries, keyCmp); + } + + if (removeEntries) { + removeEntries[1] = null; + } else { + insertIntoSortedArray(this.indexBatch.removeEntries, [key, null], keyCmp); + } + + return Promise.resolve(); + } + + if (addEntries) { + removeAnyOf(addEntries[1], values, Buffer.compare); + if (addEntries[1].length === 0) { + removeFromSortedArray(this.indexBatch.addEntries, addEntries, keyCmp); + } + } + + if (removeEntries) { + removeEntries[1] ??= []; + merge(removeEntries[1], values, Buffer.compare); + dedupeSortedArray(removeEntries[1], Buffer.compare); + } else { + insertIntoSortedArray(this.indexBatch.removeEntries, [key, values], keyCmp); + } + + return Promise.resolve(); + } + + public override async getIndex(key: Buffer): Promise { + this.assertIsOpen(); + + const removeEntries = findInSortedArray(this.indexBatch.removeEntries, key, singleKeyCmp); + if (removeEntries && removeEntries[1] === null) { + return []; + } + + const addEntries = findInSortedArray(this.indexBatch.addEntries, key, singleKeyCmp); + const results = await super.getIndex(key); + + if (addEntries) { + merge(results, addEntries[1], Buffer.compare); + dedupeSortedArray(results, Buffer.compare); + } + + if (removeEntries && Array.isArray(removeEntries[1])) { + removeAnyOf(results, removeEntries[1], Buffer.compare); + } + + return results; + } + + public override async *iterate( + startKey: Uint8Array, + endKey?: Uint8Array | undefined, + reverse?: boolean, + limit?: number, + ): AsyncIterable<[Uint8Array, Uint8Array]> { + yield* this.#iterate( + super.iterate(startKey, endKey, reverse), + this.dataBatch, + startKey, + endKey, + reverse, + limit, + (committed, toAdd) => (toAdd.length > 0 ? toAdd[0] : committed), + vals => vals[0], + ); + } + + public override async *iterateIndex( + startKey: Uint8Array, + endKey?: Uint8Array | undefined, + reverse?: boolean, + limit?: number, + ): AsyncIterable<[Uint8Array, Uint8Array[]]> { + yield* this.#iterate( + super.iterateIndex(startKey, endKey, reverse), + this.indexBatch, + startKey, + endKey, + reverse, + limit, + (committed, toAdd, toRemove) => { + if (toAdd.length > 0) { + merge(committed, toAdd, Buffer.compare); + dedupeSortedArray(committed, Buffer.compare); + } + if (toRemove.length > 0) { + removeAnyOf(committed, toRemove, Buffer.compare); + } + return committed; + }, + vals => vals, + ); + } + + async *#iterate( + iterator: AsyncIterable<[Uint8Array, T]>, + batch: Batch, + startKey: Uint8Array, + endKey: Uint8Array | undefined, + reverse: boolean = false, + limit: number | undefined, + merge: (committed: T, toAdd: Uint8Array[], toRemove: Uint8Array[]) => T, + map: (vals: Uint8Array[]) => T, + ): AsyncIterable<[Uint8Array, T]> { + this.assertIsOpen(); + + // make a copy of this in case we're running in reverse + const uncommittedEntries = [...batch.addEntries]; + // used to check we're in the right order when comparing between a key and uncommittedEntries + let cmpDirection = -1; + if (reverse) { + cmpDirection = 1; + uncommittedEntries.reverse(); + } + + let uncommittedEntriesIdx = 0; + while (uncommittedEntriesIdx < uncommittedEntries.length) { + const entry = uncommittedEntries[uncommittedEntriesIdx]; + // go to the first key in our cache that would be captured by the iterator + if (Buffer.compare(entry[0], startKey) !== cmpDirection) { + break; + } + uncommittedEntriesIdx++; + } + + let count = 0; + // helper to early return if we've reached our limit + const checkLimit = typeof limit === 'number' ? () => count < limit : () => true; + for await (const [key, values] of iterator) { + // yield every key that we have cached that's captured by the iterator + while (uncommittedEntriesIdx < uncommittedEntries.length && checkLimit()) { + const entry = uncommittedEntries[uncommittedEntriesIdx]; + if (endKey && Buffer.compare(entry[0], endKey) !== cmpDirection) { + break; + } + + if (Buffer.compare(entry[0], key) === cmpDirection) { + count++; + yield [entry[0], map(entry[1])]; + } else { + break; + } + uncommittedEntriesIdx++; + } + + if (!checkLimit()) { + // we reached the imposed `limit` + break; + } + + const toRemove = findInSortedArray(batch.removeEntries, key, singleKeyCmp); + + // at this point we've either exhausted all uncommitted entries, + // we reached a key strictly greater/smaller than `key` + // or we found the key itself + // check if it's the key and use the uncommitted value + let toAdd: Uint8Array[] = []; + if ( + uncommittedEntriesIdx < uncommittedEntries.length && + Buffer.compare(uncommittedEntries[uncommittedEntriesIdx][0], key) === 0 + ) { + toAdd = uncommittedEntries[uncommittedEntriesIdx][1]; + uncommittedEntriesIdx++; + } + + if (toRemove && !toRemove[1]) { + // we were told to delete this key entirely + continue; + } else { + const mergedValues = merge(values, toAdd, toRemove?.[1] ?? []); + if (mergedValues) { + count++; + yield [key, mergedValues]; + } + } + } + + // emit all the uncommitted data that would be captured by this iterator + while (uncommittedEntriesIdx < uncommittedEntries.length && checkLimit()) { + const entry = uncommittedEntries[uncommittedEntriesIdx]; + if (endKey && Buffer.compare(entry[0], endKey) !== cmpDirection) { + break; + } + count++; + yield [entry[0], map(entry[1])]; + uncommittedEntriesIdx++; + } + } + + public async commit() { + this.assertIsOpen(); + this.close(); + await this.channel.sendMessage(LMDBMessageType.BATCH, { + batches: new Map([ + [Database.DATA, this.dataBatch], + [Database.INDEX, this.indexBatch], + ]), + }); + } +} diff --git a/yarn-project/kv-store/src/lmdb/store.ts b/yarn-project/kv-store/src/lmdb/store.ts index d78030ec373c..82360fd80ae9 100644 --- a/yarn-project/kv-store/src/lmdb/store.ts +++ b/yarn-project/kv-store/src/lmdb/store.ts @@ -7,7 +7,7 @@ import { tmpdir } from 'os'; import { join } from 'path'; import { type AztecArray, type AztecAsyncArray } from '../interfaces/array.js'; -import { type Key } from '../interfaces/common.js'; +import { type Key, type StoreSize } from '../interfaces/common.js'; import { type AztecAsyncCounter, type AztecCounter } from '../interfaces/counter.js'; import { type AztecAsyncMap, @@ -216,7 +216,7 @@ export class AztecLmdbStore implements AztecKVStore, AztecAsyncKVStore { } } - estimateSize(): { mappingSize: number; actualSize: number; numItems: number } { + estimateSize(): Promise { const stats = this.#rootDb.getStats(); // The 'mapSize' is the total amount of virtual address space allocated to the DB (effectively the maximum possible size) // http://www.lmdb.tech/doc/group__mdb.html#a4bde3c8b676457342cba2fe27aed5fbd @@ -226,11 +226,11 @@ export class AztecLmdbStore implements AztecKVStore, AztecAsyncKVStore { } const dataResult = this.estimateSubDBSize(this.#data); const multiResult = this.estimateSubDBSize(this.#multiMapData); - return { + return Promise.resolve({ mappingSize: mapSize, actualSize: dataResult.actualSize + multiResult.actualSize, numItems: dataResult.numItems + multiResult.numItems, - }; + }); } private estimateSubDBSize(db: Database): { actualSize: number; numItems: number } { diff --git a/yarn-project/kv-store/tsconfig.json b/yarn-project/kv-store/tsconfig.json index bd860591ecb4..abd7877e8adf 100644 --- a/yarn-project/kv-store/tsconfig.json +++ b/yarn-project/kv-store/tsconfig.json @@ -15,6 +15,9 @@ { "path": "../foundation" }, + { + "path": "../native" + }, { "path": "../circuits.js" } diff --git a/yarn-project/native/.eslintrc.cjs b/yarn-project/native/.eslintrc.cjs new file mode 100644 index 000000000000..e659927475c0 --- /dev/null +++ b/yarn-project/native/.eslintrc.cjs @@ -0,0 +1 @@ +module.exports = require('@aztec/foundation/eslint'); diff --git a/yarn-project/native/.gitignore b/yarn-project/native/.gitignore new file mode 100644 index 000000000000..68c5d18f00dc --- /dev/null +++ b/yarn-project/native/.gitignore @@ -0,0 +1,5 @@ +node_modules/ +/test-results/ +/playwright-report/ +/blob-report/ +/playwright/.cache/ diff --git a/yarn-project/native/.mocharc.json b/yarn-project/native/.mocharc.json new file mode 100644 index 000000000000..d96c357952d0 --- /dev/null +++ b/yarn-project/native/.mocharc.json @@ -0,0 +1,7 @@ +{ + "require": "ts-node/register", + "extensions": ["ts"], + "spec": ["./src/**/!(indexeddb)/*.test.ts"], + "node-option": ["experimental-specifier-resolution=node", "loader=ts-node/esm"], + "timeout": 30000 +} diff --git a/yarn-project/native/README.md b/yarn-project/native/README.md new file mode 100644 index 000000000000..33d3ddfde930 --- /dev/null +++ b/yarn-project/native/README.md @@ -0,0 +1,3 @@ +# Native module + +A package containing all the native bindings needed to run Aztec. diff --git a/yarn-project/native/package.json b/yarn-project/native/package.json new file mode 100644 index 000000000000..9e98c3de8b74 --- /dev/null +++ b/yarn-project/native/package.json @@ -0,0 +1,78 @@ +{ + "name": "@aztec/native", + "version": "0.1.0", + "type": "module", + "exports": { + ".": "./dest/index.js" + }, + "scripts": { + "build": "yarn clean && yarn generate && tsc -b", + "build:dev": "tsc -b --watch", + "build:cpp": "PROJECT=$(pwd); cd $(git rev-parse --show-toplevel)/barretenberg/cpp; cmake --preset ${PRESET:-clang16-pic} && cmake --build --preset ${PRESET:-clang16-pic} --target nodejs_module && cd $PROJECT && yarn generate", + "clean:cpp": "rm -rf $(git rev-parse --show-toplevel)/barretenberg/cpp/build-pic", + "clean": "rm -rf ./dest .tsbuildinfo", + "formatting": "run -T prettier --check ./src && run -T eslint ./src", + "formatting:fix": "run -T eslint --fix ./src && run -T prettier -w ./src", + "test": "HARDWARE_CONCURRENCY=${HARDWARE_CONCURRENCY:-16} RAYON_NUM_THREADS=${RAYON_NUM_THREADS:-4} NODE_NO_WARNINGS=1 node --experimental-vm-modules ../node_modules/.bin/jest --passWithNoTests --maxWorkers=${JEST_MAX_WORKERS:-8}", + "generate": "mkdir -p build && cp -v $(git rev-parse --show-toplevel)/barretenberg/cpp/build-pic/lib/nodejs_module.node build" + }, + "inherits": [ + "../package.common.json", + "./package.local.json" + ], + "dependencies": { + "@aztec/foundation": "workspace:^", + "bindings": "^1.5.0", + "msgpackr": "^1.11.2" + }, + "devDependencies": { + "@jest/globals": "^29.5.0", + "@types/bindings": "^1.5.5", + "@types/jest": "^29.5.0", + "@types/node": "^18.7.23", + "jest": "^29.5.0", + "ts-node": "^10.9.1", + "typescript": "^5.0.4" + }, + "files": [ + "dest", + "src", + "!*.test.*" + ], + "engines": { + "node": ">=18" + }, + "jest": { + "extensionsToTreatAsEsm": [ + ".ts" + ], + "transform": { + "^.+\\.tsx?$": [ + "@swc/jest", + { + "jsc": { + "parser": { + "syntax": "typescript", + "decorators": true + }, + "transform": { + "decoratorVersion": "2022-03" + } + } + } + ] + }, + "moduleNameMapper": { + "^(\\.{1,2}/.*)\\.[cm]?js$": "$1" + }, + "reporters": [ + "default" + ], + "testRegex": "./src/.*\\.test\\.(js|mjs|ts)$", + "rootDir": "./src", + "testTimeout": 30000, + "setupFiles": [ + "../../foundation/src/jest/setup.mjs" + ] + } +} diff --git a/yarn-project/native/package.local.json b/yarn-project/native/package.local.json new file mode 100644 index 000000000000..1f916740b46e --- /dev/null +++ b/yarn-project/native/package.local.json @@ -0,0 +1,9 @@ +{ + "scripts": { + "build": "yarn clean && yarn generate && tsc -b", + "build:dev": "tsc -b --watch", + "build:cpp": "PROJECT=$(pwd); cd $(git rev-parse --show-toplevel)/barretenberg/cpp; cmake --preset ${PRESET:-clang16-pic} && cmake --build --preset ${PRESET:-clang16-pic} --target nodejs_module && cd $PROJECT && yarn generate", + "clean:cpp": "rm -rf $(git rev-parse --show-toplevel)/barretenberg/cpp/build-pic", + "generate": "mkdir -p build && cp -v $(git rev-parse --show-toplevel)/barretenberg/cpp/build-pic/lib/nodejs_module.node build" + } +} diff --git a/yarn-project/native/src/index.ts b/yarn-project/native/src/index.ts new file mode 100644 index 000000000000..64c51e1b18fc --- /dev/null +++ b/yarn-project/native/src/index.ts @@ -0,0 +1,2 @@ +export * from './native_module.js'; +export { RoundtripDuration, MsgpackChannel } from './msgpack_channel.js'; diff --git a/yarn-project/native/src/msgpack_channel.ts b/yarn-project/native/src/msgpack_channel.ts new file mode 100644 index 000000000000..05f05c1cbebd --- /dev/null +++ b/yarn-project/native/src/msgpack_channel.ts @@ -0,0 +1,109 @@ +import { Fr } from '@aztec/foundation/fields'; +import { MessageHeader, TypedMessage } from '@aztec/foundation/message'; + +import { Encoder, addExtension } from 'msgpackr'; +import { isAnyArrayBuffer } from 'util/types'; + +export interface MessageReceiver { + call(msg: Buffer | Uint8Array): Promise; +} + +export type RoundtripDuration = { + encodingUs: number; + callUs: number; + decodingUs: number; + totalUs: number; +}; + +// small extension to pack an NodeJS Fr instance to a representation that the C++ code can understand +// this only works for writes. Unpacking from C++ can't create Fr instances because the data is passed +// as raw, untagged, buffers. On the NodeJS side we don't know what the buffer represents +// Adding a tag would be a solution, but it would have to be done on both sides and it's unclear where else +// C++ fr instances are sent/received/stored. +addExtension({ + Class: Fr, + write: fr => fr.toBuffer(), +}); + +type MessageBody = { [K in T]: object | void }; + +export class MsgpackChannel< + M extends number = number, + Req extends MessageBody = any, + Resp extends MessageBody = any, +> { + /** A long-lived msgpack encoder */ + private encoder = new Encoder({ + // always encode JS objects as MessagePack maps + // this makes it compatible with other MessagePack decoders + useRecords: false, + int64AsType: 'bigint', + }); + + private msgId = 1; + + public constructor(private dest: MessageReceiver) {} + + public async sendMessage( + msgType: T, + body: Req[T], + ): Promise<{ duration: RoundtripDuration; response: Resp[T] }> { + const duration: RoundtripDuration = { + callUs: 0, + totalUs: 0, + decodingUs: 0, + encodingUs: 0, + }; + + const start = process.hrtime.bigint(); + const requestId = this.msgId++; + + const request = new TypedMessage(msgType, new MessageHeader({ requestId }), body); + const encodedRequest = this.encoder.encode(request); + const encodingEnd = process.hrtime.bigint(); + duration.encodingUs = Number((encodingEnd - start) / 1000n); + + const encodedResponse = await this.dest.call(encodedRequest); + const callEnd = process.hrtime.bigint(); + duration.callUs = Number((callEnd - encodingEnd) / 1000n); + + const buf = Buffer.isBuffer(encodedResponse) + ? encodedResponse + : isAnyArrayBuffer(encodedResponse) + ? Buffer.from(encodedResponse) + : encodedResponse; + + if (!Buffer.isBuffer(buf)) { + throw new TypeError( + 'Invalid encoded response: expected Buffer or ArrayBuffer, got ' + + (encodedResponse === null ? 'null' : typeof encodedResponse), + ); + } + + const decodedResponse = this.encoder.unpack(buf); + if (!TypedMessage.isTypedMessageLike(decodedResponse)) { + throw new TypeError( + 'Invalid response: expected TypedMessageLike, got ' + + (decodedResponse === null ? 'null' : typeof decodedResponse), + ); + } + + const response = TypedMessage.fromMessagePack(decodedResponse); + const decodingEnd = process.hrtime.bigint(); + duration.decodingUs = Number((decodingEnd - callEnd) / 1000n); + + if (response.header.requestId !== request.header.messageId) { + throw new Error( + 'Response ID does not match request: ' + response.header.requestId + ' != ' + request.header.messageId, + ); + } + + if (response.msgType !== request.msgType) { + throw new Error('Invalid response message type: ' + response.msgType + ' != ' + response.msgType); + } + + duration.totalUs = Number((process.hrtime.bigint() - start) / 1000n); + + return { duration, response: response.value }; + } +} diff --git a/yarn-project/native/src/native_module.ts b/yarn-project/native/src/native_module.ts new file mode 100644 index 000000000000..9a7a486e348d --- /dev/null +++ b/yarn-project/native/src/native_module.ts @@ -0,0 +1,12 @@ +import bindings from 'bindings'; + +import { type MessageReceiver } from './msgpack_channel.js'; + +interface NativeClassCtor { + new (...args: unknown[]): MessageReceiver; +} + +const nativeModule: Record = bindings('nodejs_module'); + +export const NativeWorldState: NativeClassCtor = nativeModule.WorldState; +export const NativeLMDBStore: NativeClassCtor = nativeModule.LMDBStore; diff --git a/yarn-project/native/tsconfig.json b/yarn-project/native/tsconfig.json new file mode 100644 index 000000000000..63f8ab3e9f75 --- /dev/null +++ b/yarn-project/native/tsconfig.json @@ -0,0 +1,14 @@ +{ + "extends": "..", + "compilerOptions": { + "outDir": "dest", + "rootDir": "src", + "tsBuildInfoFile": ".tsbuildinfo" + }, + "references": [ + { + "path": "../foundation" + } + ], + "include": ["src"] +} diff --git a/yarn-project/p2p/src/bootstrap/bootstrap.ts b/yarn-project/p2p/src/bootstrap/bootstrap.ts index 371d5789a4ab..1fda6fa5c411 100644 --- a/yarn-project/p2p/src/bootstrap/bootstrap.ts +++ b/yarn-project/p2p/src/bootstrap/bootstrap.ts @@ -1,6 +1,6 @@ import { type P2PBootstrapApi } from '@aztec/circuit-types/interfaces'; import { createLogger } from '@aztec/foundation/log'; -import { type AztecKVStore } from '@aztec/kv-store'; +import { type AztecAsyncKVStore } from '@aztec/kv-store'; import { OtelMetricsAdapter, type TelemetryClient } from '@aztec/telemetry-client'; import { Discv5, type Discv5EventEmitter } from '@chainsafe/discv5'; @@ -20,7 +20,7 @@ export class BootstrapNode implements P2PBootstrapApi { private peerId?: PeerId; constructor( - private store: AztecKVStore, + private store: AztecAsyncKVStore, private telemetry: TelemetryClient, private logger = createLogger('p2p:bootstrap'), ) {} diff --git a/yarn-project/p2p/src/client/factory.ts b/yarn-project/p2p/src/client/factory.ts index ede72a17819a..c6de223d919a 100644 --- a/yarn-project/p2p/src/client/factory.ts +++ b/yarn-project/p2p/src/client/factory.ts @@ -8,7 +8,7 @@ import { type EpochCache } from '@aztec/epoch-cache'; import { createLogger } from '@aztec/foundation/log'; import { type AztecKVStore } from '@aztec/kv-store'; import { type DataStoreConfig } from '@aztec/kv-store/config'; -import { createStore } from '@aztec/kv-store/lmdb'; +import { createStore as createStoreV2 } from '@aztec/kv-store/lmdb-v2'; import { type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-client'; import { P2PClient } from '../client/p2p_client.js'; @@ -43,8 +43,8 @@ export const createP2PClient = async ( ) => { let config = { ..._config }; const logger = createLogger('p2p'); - const store = deps.store ?? (await createStore('p2p', config, createLogger('p2p:lmdb'))); - const archive = await createStore('p2p-archive', config, createLogger('p2p-archive:lmdb')); + const store = await createStoreV2('p2p-v2', config, createLogger('p2p:lmdb-v2')); + const archive = await createStoreV2('p2p-archive', config, createLogger('p2p-archive:lmdb-v2')); const mempools: MemPools = { txPool: deps.txPool ?? new AztecKVTxPool(store, archive, telemetry, config.archivedTxLimit), diff --git a/yarn-project/p2p/src/client/p2p_client.test.ts b/yarn-project/p2p/src/client/p2p_client.test.ts index 61273ddd725a..a2690fe2ae6a 100644 --- a/yarn-project/p2p/src/client/p2p_client.test.ts +++ b/yarn-project/p2p/src/client/p2p_client.test.ts @@ -3,8 +3,8 @@ import { L2Block, P2PClientType, mockEpochProofQuote, mockTx } from '@aztec/circ import { Fr } from '@aztec/circuits.js'; import { retryUntil } from '@aztec/foundation/retry'; import { sleep } from '@aztec/foundation/sleep'; -import { type AztecKVStore } from '@aztec/kv-store'; -import { openTmpStore } from '@aztec/kv-store/lmdb'; +import { type AztecAsyncKVStore } from '@aztec/kv-store'; +import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; import { expect } from '@jest/globals'; import { type MockProxy, mock } from 'jest-mock-extended'; @@ -22,15 +22,15 @@ describe('In-Memory P2P Client', () => { let mempools: MemPools; let blockSource: MockL2BlockSource; let p2pService: MockProxy; - let kvStore: AztecKVStore; + let kvStore: AztecAsyncKVStore; let client: P2PClient; beforeEach(async () => { txPool = mock(); - txPool.getAllTxs.mockReturnValue([]); - txPool.getPendingTxHashes.mockReturnValue(Promise.resolve([])); - txPool.getMinedTxHashes.mockReturnValue([]); - txPool.getAllTxHashes.mockReturnValue([]); + txPool.getAllTxs.mockResolvedValue([]); + txPool.getPendingTxHashes.mockResolvedValue([]); + txPool.getMinedTxHashes.mockResolvedValue([]); + txPool.getAllTxHashes.mockResolvedValue([]); p2pService = mock(); @@ -48,19 +48,18 @@ describe('In-Memory P2P Client', () => { epochProofQuotePool, }; - kvStore = openTmpStore(); + kvStore = await openTmpStore('test'); client = new P2PClient(P2PClientType.Full, kvStore, blockSource, mempools, p2pService); }); + afterEach(async () => { + await kvStore.close(); + }); + const advanceToProvenBlock = async (getProvenBlockNumber: number, provenEpochNumber = getProvenBlockNumber) => { blockSource.setProvenBlockNumber(getProvenBlockNumber); blockSource.setProvenEpochNumber(provenEpochNumber); - await retryUntil( - () => Promise.resolve(client.getSyncedProvenBlockNum() >= getProvenBlockNumber), - 'synced', - 10, - 0.1, - ); + await retryUntil(async () => (await client.getSyncedProvenBlockNum()) >= getProvenBlockNumber, 'synced', 10, 0.1); }; afterEach(async () => { @@ -106,10 +105,11 @@ describe('In-Memory P2P Client', () => { it('restores the previous block number it was at', async () => { await client.start(); + const synchedBlock = await client.getSyncedLatestBlockNum(); await client.stop(); const client2 = new P2PClient(P2PClientType.Full, kvStore, blockSource, mempools, p2pService); - expect(client2.getSyncedLatestBlockNum()).toEqual(client.getSyncedLatestBlockNum()); + await expect(client2.getSyncedLatestBlockNum()).resolves.toEqual(synchedBlock); }); it('deletes txs once block is proven', async () => { @@ -253,7 +253,7 @@ describe('In-Memory P2P Client', () => { const badTx = await mockTx(); badTx.data.constants.historicalHeader.globalVariables.blockNumber = new Fr(95); - txPool.getAllTxs.mockReturnValue([goodTx, badTx]); + txPool.getAllTxs.mockResolvedValue([goodTx, badTx]); blockSource.removeBlocks(10); await sleep(150); @@ -280,8 +280,8 @@ describe('In-Memory P2P Client', () => { const badTx = await mockTx(); badTx.data.constants.historicalHeader.globalVariables.blockNumber = new Fr(95); - txPool.getAllTxs.mockReturnValue([goodButOldTx, goodTx, badTx]); - txPool.getMinedTxHashes.mockReturnValue([ + txPool.getAllTxs.mockResolvedValue([goodButOldTx, goodTx, badTx]); + txPool.getMinedTxHashes.mockResolvedValue([ [await goodButOldTx.getTxHash(), 90], [await goodTx.getTxHash(), 91], ]); diff --git a/yarn-project/p2p/src/client/p2p_client.ts b/yarn-project/p2p/src/client/p2p_client.ts index 624848c0bedd..0cb4a5b303d7 100644 --- a/yarn-project/p2p/src/client/p2p_client.ts +++ b/yarn-project/p2p/src/client/p2p_client.ts @@ -15,7 +15,7 @@ import { } from '@aztec/circuit-types'; import { INITIAL_L2_BLOCK_NUM } from '@aztec/circuits.js/constants'; import { createLogger } from '@aztec/foundation/log'; -import { type AztecKVStore, type AztecMap, type AztecSingleton } from '@aztec/kv-store'; +import { type AztecAsyncKVStore, type AztecAsyncMap, type AztecAsyncSingleton } from '@aztec/kv-store'; import { Attributes, type TelemetryClient, @@ -126,7 +126,7 @@ export type P2P = P2PApi & { * @param txHash - Hash of tx to return. * @returns A single tx or undefined. */ - getTxByHashFromPool(txHash: TxHash): Tx | undefined; + getTxByHashFromPool(txHash: TxHash): Promise; /** * Returns a transaction in the transaction pool by its hash, requesting it from the network if it is not found. @@ -147,7 +147,7 @@ export type P2P = P2PApi & { * @param txHash - Hash of the tx to query. * @returns Pending or mined depending on its status, or undefined if not found. */ - getTxStatus(txHash: TxHash): 'pending' | 'mined' | undefined; + getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | undefined>; /** Returns an iterator over pending txs on the mempool. */ iteratePendingTxs(): AsyncIterableIterator; @@ -194,9 +194,6 @@ export class P2PClient extends WithTracer implements P2P, P2P { - /** Property that indicates whether the client is running. */ - private stopping = false; - /** The JS promise that will be running to keep the client's data in sync. Can be interrupted if the client is stopped. */ private runningPromise!: Promise; @@ -206,9 +203,9 @@ export class P2PClient private latestBlockNumberAtStart = -1; private provenBlockNumberAtStart = -1; - private synchedBlockHashes: AztecMap; - private synchedLatestBlockNumber: AztecSingleton; - private synchedProvenBlockNumber: AztecSingleton; + private synchedBlockHashes: AztecAsyncMap; + private synchedLatestBlockNumber: AztecAsyncSingleton; + private synchedProvenBlockNumber: AztecAsyncSingleton; private txPool: TxPool; private attestationPool: T extends P2PClientType.Full ? AttestationPool : undefined; @@ -231,8 +228,8 @@ export class P2PClient * @param log - A logger. */ constructor( - clientType: T, - store: AztecKVStore, + _clientType: T, + store: AztecAsyncKVStore, private l2BlockSource: L2BlockSource, mempools: MemPools, private p2pService: P2PService, @@ -274,17 +271,17 @@ export class P2PClient } public getL2BlockHash(number: number): Promise { - return Promise.resolve(this.synchedBlockHashes.get(number)); + return this.synchedBlockHashes.getAsync(number); } - public getL2Tips(): Promise { - const latestBlockNumber = this.getSyncedLatestBlockNum(); + public async getL2Tips(): Promise { + const latestBlockNumber = await this.getSyncedLatestBlockNum(); let latestBlockHash: string | undefined; - const provenBlockNumber = this.getSyncedProvenBlockNum(); + const provenBlockNumber = await this.getSyncedProvenBlockNum(); let provenBlockHash: string | undefined; if (latestBlockNumber > 0) { - latestBlockHash = this.synchedBlockHashes.get(latestBlockNumber); + latestBlockHash = await this.synchedBlockHashes.getAsync(latestBlockNumber); if (typeof latestBlockHash === 'undefined') { this.log.warn(`Block hash for latest block ${latestBlockNumber} not found`); throw new Error(); @@ -292,7 +289,7 @@ export class P2PClient } if (provenBlockNumber > 0) { - provenBlockHash = this.synchedBlockHashes.get(provenBlockNumber); + provenBlockHash = await this.synchedBlockHashes.getAsync(provenBlockNumber); if (typeof provenBlockHash === 'undefined') { this.log.warn(`Block hash for proven block ${provenBlockNumber} not found`); throw new Error(); @@ -316,7 +313,7 @@ export class P2PClient // TODO (alexg): I think we can prune the block hashes map here break; case 'chain-proven': { - const from = this.getSyncedProvenBlockNum() + 1; + const from = (await this.getSyncedProvenBlockNum()) + 1; const limit = event.blockNumber - from + 1; await this.handleProvenL2Blocks(await this.l2BlockSource.getBlocks(from, limit)); break; @@ -374,8 +371,8 @@ export class P2PClient this.latestBlockNumberAtStart = await this.l2BlockSource.getBlockNumber(); this.provenBlockNumberAtStart = await this.l2BlockSource.getProvenBlockNumber(); - const syncedLatestBlock = this.getSyncedLatestBlockNum() + 1; - const syncedProvenBlock = this.getSyncedProvenBlockNum() + 1; + const syncedLatestBlock = (await this.getSyncedLatestBlockNum()) + 1; + const syncedProvenBlock = (await this.getSyncedProvenBlockNum()) + 1; // if there are blocks to be retrieved, go to a synching state if (syncedLatestBlock <= this.latestBlockNumberAtStart || syncedProvenBlock <= this.provenBlockNumberAtStart) { @@ -404,7 +401,6 @@ export class P2PClient */ public async stop() { this.log.debug('Stopping p2p client...'); - this.stopping = true; await this.p2pService.stop(); this.log.debug('Stopped p2p service'); await this.blockStream.stop(); @@ -476,13 +472,13 @@ export class P2PClient } public async getPendingTxCount(): Promise { - return (await this.txPool.getPendingTxHashes()).length; + const pendingTxs = await this.txPool.getPendingTxHashes(); + return pendingTxs.length; } public async *iteratePendingTxs(): AsyncIterableIterator { - const txHashes = await this.txPool.getPendingTxHashes(); - for (const txHash of txHashes) { - const tx = this.txPool.getTxByHash(txHash); + for (const txHash of await this.txPool.getPendingTxHashes()) { + const tx = await this.txPool.getTxByHash(txHash); if (tx) { yield tx; } @@ -497,13 +493,13 @@ export class P2PClient if (filter === 'all') { return this.txPool.getAllTxs(); } else if (filter === 'mined') { - return this.txPool - .getMinedTxHashes() - .map(([txHash]) => this.txPool.getTxByHash(txHash)) - .filter((tx): tx is Tx => !!tx); + const minedHashes = await this.txPool.getMinedTxHashes(); + const minedTx = await Promise.all(minedHashes.map(([txHash]) => this.txPool.getTxByHash(txHash))); + return minedTx.filter((tx): tx is Tx => !!tx); } else if (filter === 'pending') { - const txHashes = await this.txPool.getPendingTxHashes(); - return txHashes.map(txHash => this.txPool.getTxByHash(txHash)).filter((tx): tx is Tx => !!tx); + const pendingHashses = await this.txPool.getPendingTxHashes(); + const pendingTxs = await Promise.all(pendingHashses.map(txHash => this.txPool.getTxByHash(txHash))); + return pendingTxs.filter((tx): tx is Tx => !!tx); } else { const _: never = filter; throw new Error(`Unknown filter ${filter}`); @@ -515,7 +511,7 @@ export class P2PClient * @param txHash - Hash of the transaction to look for in the pool. * @returns A single tx or undefined. */ - getTxByHashFromPool(txHash: TxHash): Tx | undefined { + getTxByHashFromPool(txHash: TxHash): Promise { return this.txPool.getTxByHash(txHash); } @@ -525,10 +521,10 @@ export class P2PClient * @param txHash - Hash of the transaction to look for in the pool. * @returns A single tx or undefined. */ - getTxByHash(txHash: TxHash): Promise { - const tx = this.txPool.getTxByHash(txHash); + async getTxByHash(txHash: TxHash): Promise { + const tx = await this.txPool.getTxByHash(txHash); if (tx) { - return Promise.resolve(tx); + return tx; } return this.requestTxByHash(txHash); } @@ -539,7 +535,7 @@ export class P2PClient * @returns A single tx or undefined. */ getArchivedTxByHash(txHash: TxHash): Promise { - return Promise.resolve(this.txPool.getArchivedTxByHash(txHash)); + return this.txPool.getArchivedTxByHash(txHash); } /** @@ -558,7 +554,7 @@ export class P2PClient * @param txHash - Hash of the tx to query. * @returns Pending or mined depending on its status, or undefined if not found. */ - public getTxStatus(txHash: TxHash): 'pending' | 'mined' | undefined { + public getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | undefined> { return this.txPool.getTxStatus(txHash); } @@ -593,16 +589,16 @@ export class P2PClient * Public function to check the latest block number that the P2P client is synced to. * @returns Block number of latest L2 Block we've synced with. */ - public getSyncedLatestBlockNum() { - return this.synchedLatestBlockNumber.get() ?? INITIAL_L2_BLOCK_NUM - 1; + public async getSyncedLatestBlockNum(): Promise { + return (await this.synchedLatestBlockNumber.getAsync()) ?? INITIAL_L2_BLOCK_NUM - 1; } /** * Public function to check the latest proven block number that the P2P client is synced to. * @returns Block number of latest proven L2 Block we've synced with. */ - public getSyncedProvenBlockNum() { - return this.synchedProvenBlockNumber.get() ?? INITIAL_L2_BLOCK_NUM - 1; + public async getSyncedProvenBlockNum(): Promise { + return (await this.synchedProvenBlockNumber.getAsync()) ?? INITIAL_L2_BLOCK_NUM - 1; } /** @@ -610,18 +606,19 @@ export class P2PClient * @returns Information about p2p client status: state & syncedToBlockNum. */ public async getStatus(): Promise { - const blockNumber = this.getSyncedLatestBlockNum(); + const blockNumber = await this.getSyncedLatestBlockNum(); const blockHash = - blockNumber == 0 + blockNumber === 0 ? '' : await this.l2BlockSource .getBlockHeader(blockNumber) .then(header => header?.hash()) .then(hash => hash?.toString()); - return Promise.resolve({ + + return { state: this.currentState, syncedToL2Block: { number: blockNumber, hash: blockHash }, - } as P2PSyncState); + } as P2PSyncState; } /** @@ -717,7 +714,7 @@ export class P2PClient */ private async handlePruneL2Blocks(latestBlock: number): Promise { const txsToDelete: TxHash[] = []; - for (const tx of this.txPool.getAllTxs()) { + for (const tx of await this.txPool.getAllTxs()) { // every tx that's been generated against a block that has now been pruned is no longer valid if (tx.data.constants.historicalHeader.globalVariables.blockNumber.toNumber() > latestBlock) { txsToDelete.push(await tx.getTxHash()); @@ -738,7 +735,7 @@ export class P2PClient // NOTE: we can't move _all_ txs back to pending because the tx pool could keep hold of mined txs for longer // (see this.keepProvenTxsFor) const txsToMoveToPending: TxHash[] = []; - for (const [txHash, blockNumber] of this.txPool.getMinedTxHashes()) { + for (const [txHash, blockNumber] of await this.txPool.getMinedTxHashes()) { if (blockNumber > latestBlock) { txsToMoveToPending.push(txHash); } @@ -754,8 +751,8 @@ export class P2PClient private async startServiceIfSynched() { if ( this.currentState === P2PClientState.SYNCHING && - this.getSyncedLatestBlockNum() >= this.latestBlockNumberAtStart && - this.getSyncedProvenBlockNum() >= this.provenBlockNumberAtStart + (await this.getSyncedLatestBlockNum()) >= this.latestBlockNumberAtStart && + (await this.getSyncedProvenBlockNum()) >= this.provenBlockNumberAtStart ) { this.log.debug(`Synched to blocks at start`); this.setCurrentState(P2PClientState.RUNNING); @@ -775,16 +772,4 @@ export class P2PClient this.currentState = newState; this.log.debug(`Moved from state ${P2PClientState[oldState]} to ${P2PClientState[this.currentState]}`); } - - private async publishStoredTxs() { - if (!this.isReady()) { - return; - } - - const txs = this.txPool.getAllTxs(); - if (txs.length > 0) { - this.log.debug(`Publishing ${txs.length} previously stored txs`); - await Promise.all(txs.map(tx => this.p2pService.propagate(tx))); - } - } } diff --git a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts index daec7271ac82..6cdd05f977a8 100644 --- a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts +++ b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts @@ -200,18 +200,6 @@ export function describeAttestationPool(getAttestationPool: () => AttestationPoo compareAttestations(retreivedAttestationsAfterDeleteForOtherProposal, attestations2); }); - it('Should blanket delete attestations per slot and proposal (does not perform db ops if there are no attestations)', async () => { - const slotNumber = 420; - const proposalId = 'proposalId'; - - const retreivedAttestations = await ap.getAttestationsForSlot(BigInt(slotNumber), proposalId); - expect(retreivedAttestations.length).toBe(0); - - await ap.deleteAttestationsForSlotAndProposal(BigInt(slotNumber), proposalId); - - expect(metricsMock.recordRemovedObjects).toHaveBeenCalledTimes(0); - }); - it('Should delete attestations older than a given slot', async () => { const slotNumbers = [1, 2, 3, 69, 72, 74, 88, 420]; const attestations = ( diff --git a/yarn-project/p2p/src/mem_pools/attestation_pool/kv_attestation_pool.test.ts b/yarn-project/p2p/src/mem_pools/attestation_pool/kv_attestation_pool.test.ts index 2264715fa328..94cf14c2cc3c 100644 --- a/yarn-project/p2p/src/mem_pools/attestation_pool/kv_attestation_pool.test.ts +++ b/yarn-project/p2p/src/mem_pools/attestation_pool/kv_attestation_pool.test.ts @@ -1,17 +1,19 @@ -import { type AztecKVStore } from '@aztec/kv-store'; -import { openTmpStore } from '@aztec/kv-store/lmdb'; +import { type AztecAsyncKVStore } from '@aztec/kv-store'; +import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; import { describeAttestationPool } from './attestation_pool_test_suite.js'; import { KvAttestationPool } from './kv_attestation_pool.js'; describe('KV Attestation Pool', () => { let kvAttestationPool: KvAttestationPool; - let store: AztecKVStore; + let store: AztecAsyncKVStore; - beforeEach(() => { - store = openTmpStore(); + beforeEach(async () => { + store = await openTmpStore('test'); kvAttestationPool = new KvAttestationPool(store); }); + afterEach(() => store.close()); + describeAttestationPool(() => kvAttestationPool); }); diff --git a/yarn-project/p2p/src/mem_pools/attestation_pool/kv_attestation_pool.ts b/yarn-project/p2p/src/mem_pools/attestation_pool/kv_attestation_pool.ts index 051f407ab58c..c21671929441 100644 --- a/yarn-project/p2p/src/mem_pools/attestation_pool/kv_attestation_pool.ts +++ b/yarn-project/p2p/src/mem_pools/attestation_pool/kv_attestation_pool.ts @@ -1,7 +1,8 @@ import { BlockAttestation } from '@aztec/circuit-types'; import { Fr } from '@aztec/foundation/fields'; +import { toArray } from '@aztec/foundation/iterable'; import { createLogger } from '@aztec/foundation/log'; -import { type AztecKVStore, type AztecMapWithSize, type AztecMultiMap } from '@aztec/kv-store'; +import { type AztecAsyncKVStore, type AztecAsyncMap, type AztecAsyncMultiMap } from '@aztec/kv-store'; import { type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-client'; import { PoolInstrumentation, PoolName } from '../instrumentation.js'; @@ -10,146 +11,149 @@ import { type AttestationPool } from './attestation_pool.js'; export class KvAttestationPool implements AttestationPool { private metrics: PoolInstrumentation; - // Index of all proposal ids in a slot - private attestations: AztecMultiMap; + private attestations: AztecAsyncMap; + private proposalsForSlot: AztecAsyncMultiMap; + private attestationsForProposal: AztecAsyncMultiMap; constructor( - private store: AztecKVStore, + private store: AztecAsyncKVStore, telemetry: TelemetryClient = getTelemetryClient(), private log = createLogger('aztec:attestation_pool'), ) { - this.attestations = store.openMultiMap('attestations'); + this.attestations = store.openMap('attestations'); + this.proposalsForSlot = store.openMultiMap('proposals_for_slot'); + this.attestationsForProposal = store.openMultiMap('attestations_for_proposal'); + this.metrics = new PoolInstrumentation(telemetry, PoolName.ATTESTATION_POOL); } - private getProposalMapKey(slot: string, proposalId: string): string { - return `proposal-${slot}-${proposalId}`; + private getProposalKey(slot: number | bigint | Fr | string, proposalId: Fr | string | Buffer): string { + const slotStr = typeof slot === 'string' ? slot : new Fr(slot).toString(); + const proposalIdStr = + typeof proposalId === 'string' + ? proposalId + : Buffer.isBuffer(proposalId) + ? Fr.fromBuffer(proposalId).toString() + : proposalId.toString(); + + return `${slotStr}-${proposalIdStr}`; } - /** - * Get the proposal map for a given slot and proposalId - * - * Essentially a nested mapping of address -> attestation - * - * @param slot - The slot to get the proposal map for - * @param proposalId - The proposalId to get the map for - * @returns The proposal map - */ - private getProposalMap(slot: string, proposalId: string): AztecMapWithSize { - const mapKey = this.getProposalMapKey(slot, proposalId); - return this.store.openMapWithSize(mapKey); + private getAttestationKey(slot: number | bigint | Fr | string, proposalId: Fr | string, address: string): string { + return `${this.getProposalKey(slot, proposalId)}-${address}`; } public async addAttestations(attestations: BlockAttestation[]): Promise { - for (const attestation of attestations) { - const slotNumber = attestation.payload.header.globalVariables.slotNumber; - const proposalId = attestation.archive.toString(); - const address = (await attestation.getSender()).toString(); + await this.store.transactionAsync(async () => { + for (const attestation of attestations) { + const slotNumber = attestation.payload.header.globalVariables.slotNumber; + const proposalId = attestation.archive; + const address = (await attestation.getSender()).toString(); - // Index the proposalId in the slot map - await this.attestations.set(slotNumber.toString(), proposalId); + await this.attestations.set(this.getAttestationKey(slotNumber, proposalId, address), attestation.toBuffer()); - // Store the actual attestation in the proposal map - const proposalMap = this.getProposalMap(slotNumber.toString(), proposalId); - await proposalMap.set(address, attestation.toBuffer()); + await this.proposalsForSlot.set(slotNumber.toString(), proposalId.toString()); + await this.attestationsForProposal.set( + this.getProposalKey(slotNumber, proposalId), + this.getAttestationKey(slotNumber, proposalId, address), + ); - this.log.verbose(`Added attestation for slot ${slotNumber.toNumber()} from ${address}`, { - slotNumber: slotNumber.toNumber(), - }); - } + this.log.verbose(`Added attestation for slot ${slotNumber} from ${address}`); + } + }); this.metrics.recordAddedObjects(attestations.length); } - public getAttestationsForSlot(slot: bigint, proposalId: string): Promise { - const slotNumber = new Fr(slot).toString(); - const proposalMap = this.getProposalMap(slotNumber, proposalId); - const attestations = proposalMap.values(); - const attestationsArray = Array.from(attestations).map(attestation => BlockAttestation.fromBuffer(attestation)); - return Promise.resolve(attestationsArray); - } + public async getAttestationsForSlot(slot: bigint, proposalId: string): Promise { + const attestationIds = await toArray( + this.attestationsForProposal.getValuesAsync(this.getProposalKey(slot, proposalId)), + ); + const attestations: BlockAttestation[] = []; - public async deleteAttestationsOlderThan(oldestSlot: bigint): Promise { - const olderThan = []; + // alternatively iterate this.attestaions starting from slot-proposal-EthAddress.zero + for (const id of attestationIds) { + const buf = await this.attestations.getAsync(id); - const slots = this.attestations.keys(); - for (const slot of slots) { - if (BigInt(slot) < oldestSlot) { - olderThan.push(slot); + if (!buf) { + // this should not happen unless we lost writes + throw new Error('Attestation not found ' + id); } + + const attestation = BlockAttestation.fromBuffer(buf); + attestations.push(attestation); } - await Promise.all(olderThan.map(oldSlot => this.deleteAttestationsForSlot(BigInt(oldSlot)))); - return Promise.resolve(); + return attestations; } - public async deleteAttestationsForSlot(slot: bigint): Promise { - const deletionPromises = []; + public async deleteAttestationsOlderThan(oldestSlot: bigint): Promise { + const olderThan = await toArray(this.proposalsForSlot.keysAsync({ end: new Fr(oldestSlot).toString() })); + for (const oldSlot of olderThan) { + await this.deleteAttestationsForSlot(BigInt(oldSlot)); + } + } - const slotString = new Fr(slot).toString(); + public async deleteAttestationsForSlot(slot: bigint): Promise { + const slotFr = new Fr(slot); let numberOfAttestations = 0; - const proposalIds = this.attestations.getValues(slotString); - - if (proposalIds) { + await this.store.transactionAsync(async () => { + const proposalIds = await toArray(this.proposalsForSlot.getValuesAsync(slotFr.toString())); for (const proposalId of proposalIds) { - const proposalMap = this.getProposalMap(slotString, proposalId); - numberOfAttestations += proposalMap.size(); - deletionPromises.push(proposalMap.clear()); - } - } + const attestations = await toArray( + this.attestationsForProposal.getValuesAsync(this.getProposalKey(slotFr, proposalId)), + ); - await Promise.all(deletionPromises); + numberOfAttestations += attestations.length; + for (const attestation of attestations) { + await this.attestations.delete(attestation); + } + + await this.attestationsForProposal.delete(this.getProposalKey(slotFr, proposalId)); + } + }); this.log.verbose(`Removed ${numberOfAttestations} attestations for slot ${slot}`); this.metrics.recordRemovedObjects(numberOfAttestations); - return Promise.resolve(); } public async deleteAttestationsForSlotAndProposal(slot: bigint, proposalId: string): Promise { - const deletionPromises = []; - - const slotString = new Fr(slot).toString(); - const exists = this.attestations.get(slotString); - - if (exists) { - // Remove the proposalId from the slot index - deletionPromises.push(this.attestations.deleteValue(slotString, proposalId)); - - // Delete all attestations for the proposalId - const proposalMap = this.getProposalMap(slotString, proposalId); - const numberOfAttestations = proposalMap.size(); - deletionPromises.push(proposalMap.clear()); + let numberOfAttestations = 0; + await this.store.transactionAsync(async () => { + const slotString = new Fr(slot).toString(); + const attestations = await toArray( + this.attestationsForProposal.getValuesAsync(this.getProposalKey(slot, proposalId)), + ); + + numberOfAttestations += attestations.length; + for (const attestation of attestations) { + await this.attestations.delete(attestation); + } - this.log.verbose(`Removed ${numberOfAttestations} attestations for slot ${slot} and proposal ${proposalId}`); - this.metrics.recordRemovedObjects(numberOfAttestations); - } + await this.proposalsForSlot.deleteValue(slotString, proposalId); + await this.attestationsForProposal.delete(this.getProposalKey(slotString, proposalId)); + }); - await Promise.all(deletionPromises); - return Promise.resolve(); + this.log.verbose(`Removed ${numberOfAttestations} attestations for slot ${slot} and proposal ${proposalId}`); + this.metrics.recordRemovedObjects(numberOfAttestations); } public async deleteAttestations(attestations: BlockAttestation[]): Promise { - const deletionPromises = []; + await this.store.transactionAsync(async () => { + for (const attestation of attestations) { + const slotNumber = attestation.payload.header.globalVariables.slotNumber; + const proposalId = attestation.archive; + const address = (await attestation.getSender()).toString(); - for (const attestation of attestations) { - const slotNumber = attestation.payload.header.globalVariables.slotNumber.toString(); - const proposalId = attestation.archive.toString(); - const proposalMap = this.getProposalMap(slotNumber, proposalId); + await this.attestations.delete(this.getAttestationKey(slotNumber, proposalId, address)); + await this.attestationsForProposal.deleteValue( + this.getProposalKey(slotNumber, proposalId), + this.getAttestationKey(slotNumber, proposalId, address), + ); - if (proposalMap) { - const address = (await attestation.getSender()).toString(); - deletionPromises.push(proposalMap.delete(address)); this.log.debug(`Deleted attestation for slot ${slotNumber} from ${address}`); } - - if (proposalMap.size() === 0) { - deletionPromises.push(this.attestations.deleteValue(slotNumber, proposalId)); - } - } - - await Promise.all(deletionPromises); - + }); this.metrics.recordRemovedObjects(attestations.length); - return Promise.resolve(); } } diff --git a/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.test.ts b/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.test.ts index b76166d1c12d..cbcc397bcd24 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.test.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.test.ts @@ -1,20 +1,20 @@ import { mockTx } from '@aztec/circuit-types'; -import { openTmpStore } from '@aztec/kv-store/lmdb'; +import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; import { AztecKVTxPool } from './aztec_kv_tx_pool.js'; import { describeTxPool } from './tx_pool_test_suite.js'; describe('KV TX pool', () => { let txPool: AztecKVTxPool; - beforeEach(() => { - txPool = new AztecKVTxPool(openTmpStore(), openTmpStore()); + beforeEach(async () => { + txPool = new AztecKVTxPool(await openTmpStore('p2p'), await openTmpStore('archive')); }); describeTxPool(() => txPool); it('Returns archived txs and purges archived txs once the archived tx limit is reached', async () => { // set the archived tx limit to 2 - txPool = new AztecKVTxPool(openTmpStore(), openTmpStore(), undefined, 2); + txPool = new AztecKVTxPool(await openTmpStore('p2p'), await openTmpStore('archive'), undefined, 2); const tx1 = await mockTx(1); const tx2 = await mockTx(2); @@ -25,21 +25,21 @@ describe('KV TX pool', () => { // delete two txs and assert that they are properly archived await txPool.deleteTxs([await tx1.getTxHash(), await tx2.getTxHash()]); - expect(txPool.getArchivedTxByHash(await tx1.getTxHash())).toEqual(tx1); - expect(txPool.getArchivedTxByHash(await tx2.getTxHash())).toEqual(tx2); + await expect(txPool.getArchivedTxByHash(await tx1.getTxHash())).resolves.toEqual(tx1); + await expect(txPool.getArchivedTxByHash(await tx2.getTxHash())).resolves.toEqual(tx2); // delete a single tx and assert that the first tx is purged and the new tx is archived await txPool.deleteTxs([await tx3.getTxHash()]); - expect(txPool.getArchivedTxByHash(await tx1.getTxHash())).toBeUndefined(); - expect(txPool.getArchivedTxByHash(await tx2.getTxHash())).toEqual(tx2); - expect(txPool.getArchivedTxByHash(await tx3.getTxHash())).toEqual(tx3); + await expect(txPool.getArchivedTxByHash(await tx1.getTxHash())).resolves.toBeUndefined(); + await expect(txPool.getArchivedTxByHash(await tx2.getTxHash())).resolves.toEqual(tx2); + await expect(txPool.getArchivedTxByHash(await tx3.getTxHash())).resolves.toEqual(tx3); // delete multiple txs and assert that the old txs are purged and the new txs are archived await txPool.deleteTxs([await tx4.getTxHash(), await tx5.getTxHash()]); - expect(txPool.getArchivedTxByHash(await tx1.getTxHash())).toBeUndefined(); - expect(txPool.getArchivedTxByHash(await tx2.getTxHash())).toBeUndefined(); - expect(txPool.getArchivedTxByHash(await tx3.getTxHash())).toBeUndefined(); - expect(txPool.getArchivedTxByHash(await tx4.getTxHash())).toEqual(tx4); - expect(txPool.getArchivedTxByHash(await tx5.getTxHash())).toEqual(tx5); + await expect(txPool.getArchivedTxByHash(await tx1.getTxHash())).resolves.toBeUndefined(); + await expect(txPool.getArchivedTxByHash(await tx2.getTxHash())).resolves.toBeUndefined(); + await expect(txPool.getArchivedTxByHash(await tx3.getTxHash())).resolves.toBeUndefined(); + await expect(txPool.getArchivedTxByHash(await tx4.getTxHash())).resolves.toEqual(tx4); + await expect(txPool.getArchivedTxByHash(await tx5.getTxHash())).resolves.toEqual(tx5); }); }); diff --git a/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts b/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts index 61f15c16e4c1..ed42df5086b7 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts @@ -1,8 +1,9 @@ import { Tx, TxHash } from '@aztec/circuit-types'; import { type TxAddedToPoolStats } from '@aztec/circuit-types/stats'; import { ClientIvcProof } from '@aztec/circuits.js'; +import { toArray } from '@aztec/foundation/iterable'; import { type Logger, createLogger } from '@aztec/foundation/log'; -import { type AztecKVStore, type AztecMap, type AztecMultiMap } from '@aztec/kv-store'; +import type { AztecAsyncKVStore, AztecAsyncMap, AztecAsyncMultiMap } from '@aztec/kv-store'; import { type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-client'; import { PoolInstrumentation, PoolName } from '../instrumentation.js'; @@ -13,25 +14,25 @@ import { type TxPool } from './tx_pool.js'; * KV implementation of the Transaction Pool. */ export class AztecKVTxPool implements TxPool { - #store: AztecKVStore; + #store: AztecAsyncKVStore; /** Our tx pool, stored as a Map, with K: tx hash and V: the transaction. */ - #txs: AztecMap; + #txs: AztecAsyncMap; /** Index from tx hash to the block number in which they were mined, filtered by mined txs. */ - #minedTxHashToBlock: AztecMap; + #minedTxHashToBlock: AztecAsyncMap; /** Index from tx priority (stored as hex) to its tx hash, filtered by pending txs. */ - #pendingTxPriorityToHash: AztecMultiMap; + #pendingTxPriorityToHash: AztecAsyncMultiMap; /** KV store for archived txs. */ - #archive: AztecKVStore; + #archive: AztecAsyncKVStore; /** Archived txs map for future lookup. */ - #archivedTxs: AztecMap; + #archivedTxs: AztecAsyncMap; /** Indexes of the archived txs by insertion order. */ - #archivedTxIndices: AztecMap; + #archivedTxIndices: AztecAsyncMap; /** Number of txs to archive. */ #archivedTxLimit: number; @@ -49,8 +50,8 @@ export class AztecKVTxPool implements TxPool { * @param log - A logger. */ constructor( - store: AztecKVStore, - archive: AztecKVStore, + store: AztecAsyncKVStore, + archive: AztecAsyncKVStore, telemetry: TelemetryClient = getTelemetryClient(), archivedTxLimit: number = 0, log = createLogger('p2p:tx_pool'), @@ -75,16 +76,16 @@ export class AztecKVTxPool implements TxPool { } let deletedPending = 0; - return this.#store.transaction(() => { + return this.#store.transactionAsync(async () => { for (const hash of txHashes) { const key = hash.toString(); - void this.#minedTxHashToBlock.set(key, blockNumber); + await this.#minedTxHashToBlock.set(key, blockNumber); - const tx = this.getTxByHash(hash); + const tx = await this.getTxByHash(hash); if (tx) { deletedPending++; const fee = getPendingTxPriority(tx); - void this.#pendingTxPriorityToHash.deleteValue(fee, key); + await this.#pendingTxPriorityToHash.deleteValue(fee, key); } } this.#metrics.recordAddedObjects(txHashes.length, 'mined'); @@ -98,14 +99,14 @@ export class AztecKVTxPool implements TxPool { } let markedAsPending = 0; - return this.#store.transaction(() => { + return this.#store.transactionAsync(async () => { for (const hash of txHashes) { const key = hash.toString(); - void this.#minedTxHashToBlock.delete(key); + await this.#minedTxHashToBlock.delete(key); - const tx = this.getTxByHash(hash); + const tx = await this.getTxByHash(hash); if (tx) { - void this.#pendingTxPriorityToHash.set(getPendingTxPriority(tx), key); + await this.#pendingTxPriorityToHash.set(getPendingTxPriority(tx), key); markedAsPending++; } } @@ -115,24 +116,23 @@ export class AztecKVTxPool implements TxPool { }); } - public getPendingTxHashes(): Promise { - return Promise.resolve( - Array.from(this.#pendingTxPriorityToHash.values({ reverse: true })).map(x => TxHash.fromString(x)), - ); + public async getPendingTxHashes(): Promise { + const vals = await toArray(this.#pendingTxPriorityToHash.valuesAsync({ reverse: true })); + return vals.map(x => TxHash.fromString(x)); } - public getMinedTxHashes(): [TxHash, number][] { - return Array.from(this.#minedTxHashToBlock.entries()).map(([txHash, blockNumber]) => [ - TxHash.fromString(txHash), - blockNumber, - ]); + public async getMinedTxHashes(): Promise<[TxHash, number][]> { + const vals = await toArray(this.#minedTxHashToBlock.entriesAsync()); + return vals.map(([txHash, blockNumber]) => [TxHash.fromString(txHash), blockNumber]); } - public getTxStatus(txHash: TxHash): 'pending' | 'mined' | undefined { + public async getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | undefined> { const key = txHash.toString(); - if (this.#minedTxHashToBlock.has(key)) { + const [isMined, isKnown] = await Promise.all([this.#minedTxHashToBlock.hasAsync(key), this.#txs.hasAsync(key)]); + + if (isMined) { return 'mined'; - } else if (this.#txs.has(key)) { + } else if (isKnown) { return 'pending'; } else { return undefined; @@ -144,8 +144,8 @@ export class AztecKVTxPool implements TxPool { * @param txHash - The generated tx hash. * @returns The transaction, if found, 'undefined' otherwise. */ - public getTxByHash(txHash: TxHash): Tx | undefined { - const buffer = this.#txs.get(txHash.toString()); + public async getTxByHash(txHash: TxHash): Promise { + const buffer = await this.#txs.getAsync(txHash.toString()); if (buffer) { const tx = Tx.fromBuffer(buffer); tx.setTxHash(txHash); @@ -159,8 +159,8 @@ export class AztecKVTxPool implements TxPool { * @param txHash - The tx hash. * @returns The transaction metadata, if found, 'undefined' otherwise. */ - public getArchivedTxByHash(txHash: TxHash): Tx | undefined { - const buffer = this.#archivedTxs.get(txHash.toString()); + public async getArchivedTxByHash(txHash: TxHash): Promise { + const buffer = await this.#archivedTxs.getAsync(txHash.toString()); if (buffer) { const tx = Tx.fromBuffer(buffer); tx.setTxHash(txHash); @@ -178,25 +178,27 @@ export class AztecKVTxPool implements TxPool { const hashesAndStats = await Promise.all( txs.map(async tx => ({ txHash: await tx.getTxHash(), txStats: await tx.getStats() })), ); - return this.#store.transaction(() => { + await this.#store.transactionAsync(async () => { let pendingCount = 0; - txs.forEach((tx, i) => { - const { txHash, txStats } = hashesAndStats[i]; - this.#log.verbose(`Adding tx ${txHash.toString()} to pool`, { - eventName: 'tx-added-to-pool', - ...txStats, - } satisfies TxAddedToPoolStats); - - const key = txHash.toString(); - void this.#txs.set(key, tx.toBuffer()); - - if (!this.#minedTxHashToBlock.has(key)) { - pendingCount++; - // REFACTOR: Use an lmdb conditional write to avoid race conditions with this write tx - void this.#pendingTxPriorityToHash.set(getPendingTxPriority(tx), key); - this.#metrics.recordSize(tx); - } - }); + await Promise.all( + txs.map(async (tx, i) => { + const { txHash, txStats } = hashesAndStats[i]; + this.#log.verbose(`Adding tx ${txHash.toString()} to pool`, { + eventName: 'tx-added-to-pool', + ...txStats, + } satisfies TxAddedToPoolStats); + + const key = txHash.toString(); + await this.#txs.set(key, tx.toBuffer()); + + if (!(await this.#minedTxHashToBlock.hasAsync(key))) { + pendingCount++; + // REFACTOR: Use an lmdb conditional write to avoid race conditions with this write tx + await this.#pendingTxPriorityToHash.set(getPendingTxPriority(tx), key); + this.#metrics.recordSize(tx); + } + }), + ); this.#metrics.recordAddedObjects(pendingCount, 'pending'); }); @@ -212,16 +214,16 @@ export class AztecKVTxPool implements TxPool { let minedDeleted = 0; const deletedTxs: Tx[] = []; - const poolDbTx = this.#store.transaction(() => { + const poolDbTx = this.#store.transactionAsync(async () => { for (const hash of txHashes) { const key = hash.toString(); - const tx = this.getTxByHash(hash); + const tx = await this.getTxByHash(hash); if (tx) { const fee = getPendingTxPriority(tx); - void this.#pendingTxPriorityToHash.deleteValue(fee, key); + await this.#pendingTxPriorityToHash.deleteValue(fee, key); - const isMined = this.#minedTxHashToBlock.has(key); + const isMined = await this.#minedTxHashToBlock.hasAsync(key); if (isMined) { minedDeleted++; } else { @@ -232,8 +234,8 @@ export class AztecKVTxPool implements TxPool { deletedTxs.push(tx); } - void this.#txs.delete(key); - void this.#minedTxHashToBlock.delete(key); + await this.#txs.delete(key); + await this.#minedTxHashToBlock.delete(key); } } @@ -248,8 +250,9 @@ export class AztecKVTxPool implements TxPool { * Gets all the transactions stored in the pool. * @returns Array of tx objects in the order they were added to the pool. */ - public getAllTxs(): Tx[] { - return Array.from(this.#txs.entries()).map(([hash, buffer]) => { + public async getAllTxs(): Promise { + const vals = await toArray(this.#txs.entriesAsync()); + return vals.map(([hash, buffer]) => { const tx = Tx.fromBuffer(buffer); tx.setTxHash(TxHash.fromString(hash)); return tx; @@ -260,8 +263,9 @@ export class AztecKVTxPool implements TxPool { * Gets the hashes of all transactions currently in the tx pool. * @returns An array of transaction hashes found in the tx pool. */ - public getAllTxHashes(): TxHash[] { - return Array.from(this.#txs.keys()).map(x => TxHash.fromString(x)); + public async getAllTxHashes(): Promise { + const vals = await toArray(this.#txs.keysAsync()); + return vals.map(x => TxHash.fromString(x)); } /** @@ -271,17 +275,19 @@ export class AztecKVTxPool implements TxPool { */ private async archiveTxs(txs: Tx[]): Promise { const txHashes = await Promise.all(txs.map(tx => tx.getTxHash())); - return this.#archive.transaction(() => { + await this.#archive.transactionAsync(async () => { // calcualte the head and tail indices of the archived txs by insertion order. - let headIdx = (this.#archivedTxIndices.entries({ limit: 1, reverse: true }).next().value?.[0] ?? -1) + 1; - let tailIdx = this.#archivedTxIndices.entries({ limit: 1 }).next().value?.[0] ?? 0; + let headIdx = + ((await this.#archivedTxIndices.entriesAsync({ limit: 1, reverse: true }).next()).value?.[0] ?? -1) + 1; + let tailIdx = (await this.#archivedTxIndices.entriesAsync({ limit: 1 }).next()).value?.[0] ?? 0; - txs.forEach((tx, i) => { + for (let i = 0; i < txs.length; i++) { + const tx = txs[i]; while (headIdx - tailIdx >= this.#archivedTxLimit) { - const txHash = this.#archivedTxIndices.get(tailIdx); + const txHash = await this.#archivedTxIndices.getAsync(tailIdx); if (txHash) { - void this.#archivedTxs.delete(txHash); - void this.#archivedTxIndices.delete(tailIdx); + await this.#archivedTxs.delete(txHash); + await this.#archivedTxIndices.delete(tailIdx); } tailIdx++; } @@ -294,10 +300,10 @@ export class AztecKVTxPool implements TxPool { tx.publicTeardownFunctionCall, ); const txHash = txHashes[i].toString(); - void this.#archivedTxs.set(txHash, archivedTx.toBuffer()); - void this.#archivedTxIndices.set(headIdx, txHash); + await this.#archivedTxs.set(txHash, archivedTx.toBuffer()); + await this.#archivedTxIndices.set(headIdx, txHash); headIdx++; - }); + } }); } } diff --git a/yarn-project/p2p/src/mem_pools/tx_pool/memory_tx_pool.ts b/yarn-project/p2p/src/mem_pools/tx_pool/memory_tx_pool.ts index 32670ea58df4..5930f32a7c04 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool/memory_tx_pool.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool/memory_tx_pool.ts @@ -69,26 +69,28 @@ export class InMemoryTxPool implements TxPool { } public async getPendingTxHashes(): Promise { - const txs = this.getAllTxs().sort( + const txs = (await this.getAllTxs()).sort( (tx1, tx2) => -getPendingTxPriority(tx1).localeCompare(getPendingTxPriority(tx2)), ); const txHashes = await Promise.all(txs.map(tx => tx.getTxHash())); return txHashes.filter(txHash => this.pendingTxs.has(txHash.toBigInt())); } - public getMinedTxHashes(): [TxHash, number][] { - return Array.from(this.minedTxs.entries()).map(([txHash, blockNumber]) => [TxHash.fromBigInt(txHash), blockNumber]); + public getMinedTxHashes(): Promise<[TxHash, number][]> { + return Promise.resolve( + Array.from(this.minedTxs.entries()).map(([txHash, blockNumber]) => [TxHash.fromBigInt(txHash), blockNumber]), + ); } - public getTxStatus(txHash: TxHash): 'pending' | 'mined' | undefined { + public getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | undefined> { const key = txHash.toBigInt(); if (this.pendingTxs.has(key)) { - return 'pending'; + return Promise.resolve('pending'); } if (this.minedTxs.has(key)) { - return 'mined'; + return Promise.resolve('mined'); } - return undefined; + return Promise.resolve(undefined); } /** @@ -96,13 +98,13 @@ export class InMemoryTxPool implements TxPool { * @param txHash - The generated tx hash. * @returns The transaction, if found, 'undefined' otherwise. */ - public getTxByHash(txHash: TxHash): Tx | undefined { + public getTxByHash(txHash: TxHash): Promise { const result = this.txs.get(txHash.toBigInt()); - return result === undefined ? undefined : Tx.clone(result); + return Promise.resolve(result === undefined ? undefined : Tx.clone(result)); } - public getArchivedTxByHash(): Tx | undefined { - return undefined; + public getArchivedTxByHash(): Promise { + return Promise.resolve(undefined); } /** @@ -158,15 +160,15 @@ export class InMemoryTxPool implements TxPool { * Gets all the transactions stored in the pool. * @returns Array of tx objects in the order they were added to the pool. */ - public getAllTxs(): Tx[] { - return Array.from(this.txs.values()).map(x => Tx.clone(x)); + public getAllTxs(): Promise { + return Promise.resolve(Array.from(this.txs.values()).map(x => Tx.clone(x))); } /** * Gets the hashes of all transactions currently in the tx pool. * @returns An array of transaction hashes found in the tx pool. */ - public getAllTxHashes(): TxHash[] { - return Array.from(this.txs.keys()).map(x => TxHash.fromBigInt(x)); + public getAllTxHashes(): Promise { + return Promise.resolve(Array.from(this.txs.keys()).map(x => TxHash.fromBigInt(x))); } } diff --git a/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool.ts b/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool.ts index 9774fda33222..3fcbfa2ff6d8 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool.ts @@ -15,14 +15,14 @@ export interface TxPool { * @param txHash - The hash of the transaction, used as an ID. * @returns The transaction, if found, 'undefined' otherwise. */ - getTxByHash(txHash: TxHash): Tx | undefined; + getTxByHash(txHash: TxHash): Promise; /** * Checks if an archived transaction exists in the pool and returns it. * @param txHash - The hash of the transaction, used as an ID. * @returns The transaction, if found, 'undefined' otherwise. */ - getArchivedTxByHash(txHash: TxHash): Tx | undefined; + getArchivedTxByHash(txHash: TxHash): Promise; /** * Marks the set of txs as mined, as opposed to pending. @@ -47,13 +47,13 @@ export interface TxPool { * Gets all transactions currently in the tx pool. * @returns An array of transaction objects found in the tx pool. */ - getAllTxs(): Tx[]; + getAllTxs(): Promise; /** * Gets the hashes of all transactions currently in the tx pool. * @returns An array of transaction hashes found in the tx pool. */ - getAllTxHashes(): TxHash[]; + getAllTxHashes(): Promise; /** * Gets the hashes of pending transactions currently in the tx pool sorted by priority (see getPendingTxPriority). @@ -65,12 +65,12 @@ export interface TxPool { * Gets the hashes of mined transactions currently in the tx pool. * @returns An array of mined transaction hashes found in the tx pool. */ - getMinedTxHashes(): [tx: TxHash, blockNumber: number][]; + getMinedTxHashes(): Promise<[tx: TxHash, blockNumber: number][]>; /** * Returns whether the given tx hash is flagged as pending or mined. * @param txHash - Hash of the tx to query. * @returns Pending or mined depending on its status, or undefined if not found. */ - getTxStatus(txHash: TxHash): 'pending' | 'mined' | undefined; + getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | undefined>; } diff --git a/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool_test_suite.ts b/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool_test_suite.ts index 93ead92156c7..1e4129f8d499 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool_test_suite.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool_test_suite.ts @@ -19,9 +19,9 @@ export function describeTxPool(getTxPool: () => TxPool) { const tx1 = await mockTx(); await pool.addTxs([tx1]); - const poolTx = pool.getTxByHash(await tx1.getTxHash()); + const poolTx = await pool.getTxByHash(await tx1.getTxHash()); expect(await poolTx!.getTxHash()).toEqual(await tx1.getTxHash()); - expect(pool.getTxStatus(await tx1.getTxHash())).toEqual('pending'); + await expect(pool.getTxStatus(await tx1.getTxHash())).resolves.toEqual('pending'); await expect(pool.getPendingTxHashes()).resolves.toEqual([await tx1.getTxHash()]); }); @@ -31,8 +31,8 @@ export function describeTxPool(getTxPool: () => TxPool) { await pool.addTxs([tx1]); await pool.deleteTxs([await tx1.getTxHash()]); - expect(pool.getTxByHash(await tx1.getTxHash())).toBeFalsy(); - expect(pool.getTxStatus(await tx1.getTxHash())).toBeUndefined(); + await expect(pool.getTxByHash(await tx1.getTxHash())).resolves.toBeFalsy(); + await expect(pool.getTxStatus(await tx1.getTxHash())).resolves.toBeUndefined(); }); it('Marks txs as mined', async () => { @@ -42,9 +42,9 @@ export function describeTxPool(getTxPool: () => TxPool) { await pool.addTxs([tx1, tx2]); await pool.markAsMined([await tx1.getTxHash()], 1); - expect(pool.getTxByHash(await tx1.getTxHash())).toEqual(tx1); - expect(pool.getTxStatus(await tx1.getTxHash())).toEqual('mined'); - expect(pool.getMinedTxHashes()).toEqual([[await tx1.getTxHash(), 1]]); + await expect(pool.getTxByHash(await tx1.getTxHash())).resolves.toEqual(tx1); + await expect(pool.getTxStatus(await tx1.getTxHash())).resolves.toEqual('mined'); + await expect(pool.getMinedTxHashes()).resolves.toEqual([[await tx1.getTxHash(), 1]]); await expect(pool.getPendingTxHashes()).resolves.toEqual([await tx2.getTxHash()]); }); @@ -56,7 +56,7 @@ export function describeTxPool(getTxPool: () => TxPool) { await pool.markAsMined([await tx1.getTxHash()], 1); await pool.markMinedAsPending([await tx1.getTxHash()]); - expect(pool.getMinedTxHashes()).toEqual([]); + await expect(pool.getMinedTxHashes()).resolves.toEqual([]); const pending = await pool.getPendingTxHashes(); expect(pending).toHaveLength(2); expect(pending).toEqual(expect.arrayContaining([await tx1.getTxHash(), await tx2.getTxHash()])); @@ -70,8 +70,8 @@ export function describeTxPool(getTxPool: () => TxPool) { await pool.addTxs([tx1]); // this peer knows that tx2 was mined, but it does not have the tx object await pool.markAsMined([await tx1.getTxHash(), someTxHashThatThisPeerDidNotSee], 1); - expect(new Set(pool.getMinedTxHashes())).toEqual( - new Set([ + expect(await pool.getMinedTxHashes()).toEqual( + expect.arrayContaining([ [await tx1.getTxHash(), 1], [someTxHashThatThisPeerDidNotSee, 1], ]), @@ -79,7 +79,7 @@ export function describeTxPool(getTxPool: () => TxPool) { // reorg: both txs should now become available again await pool.markMinedAsPending([await tx1.getTxHash(), someTxHashThatThisPeerDidNotSee]); - expect(pool.getMinedTxHashes()).toEqual([]); + await expect(pool.getMinedTxHashes()).resolves.toEqual([]); await expect(pool.getPendingTxHashes()).resolves.toEqual([await tx1.getTxHash()]); // tx2 is not in the pool }); @@ -90,7 +90,7 @@ export function describeTxPool(getTxPool: () => TxPool) { await pool.addTxs([tx1, tx2, tx3]); - const poolTxs = pool.getAllTxs(); + const poolTxs = await pool.getAllTxs(); expect(poolTxs).toHaveLength(3); expect(poolTxs).toEqual(expect.arrayContaining([tx1, tx2, tx3])); }); @@ -102,7 +102,7 @@ export function describeTxPool(getTxPool: () => TxPool) { await pool.addTxs([tx1, tx2, tx3]); - const poolTxHashes = pool.getAllTxHashes(); + const poolTxHashes = await pool.getAllTxHashes(); expect(poolTxHashes).toHaveLength(3); expect(poolTxHashes).toEqual( expect.arrayContaining([await tx1.getTxHash(), await tx2.getTxHash(), await tx3.getTxHash()]), diff --git a/yarn-project/p2p/src/services/data_store.test.ts b/yarn-project/p2p/src/services/data_store.test.ts index 9784dc4d6e7a..71d260759418 100644 --- a/yarn-project/p2p/src/services/data_store.test.ts +++ b/yarn-project/p2p/src/services/data_store.test.ts @@ -1,6 +1,8 @@ import { randomBytes } from '@aztec/foundation/crypto'; import { all } from '@aztec/foundation/iterable'; +import { type AztecAsyncKVStore } from '@aztec/kv-store'; import { AztecLmdbStore } from '@aztec/kv-store/lmdb'; +import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; import { type Datastore, @@ -21,22 +23,22 @@ const CLEANUP_TIMEOUT = 120_000; describe('AztecDatastore with AztecLmdbStore', () => { let datastore: AztecDatastore; - let aztecStore: AztecLmdbStore; + let aztecStore: AztecAsyncKVStore; - beforeEach(() => { - aztecStore = AztecLmdbStore.open(); + beforeEach(async () => { + aztecStore = await openTmpStore('test'); datastore = new AztecDatastore(aztecStore); }); afterEach(async () => { - await aztecStore.delete(); + await aztecStore.close(); }); it('should store and retrieve an item', async () => { const key = new Key('testKey'); const value = new Uint8Array([1, 2, 3]); await datastore.put(key, value); - const retrieved = datastore.get(key); + const retrieved = await datastore.get(key); expect(retrieved).toEqual(value); }); @@ -46,11 +48,7 @@ describe('AztecDatastore with AztecLmdbStore', () => { await datastore.put(key, new Uint8Array([1, 2, 3])); await datastore.delete(key); - try { - datastore.get(key); - } catch (err) { - expect(err).toHaveProperty('code', 'ERR_NOT_FOUND'); - } + await expect(datastore.get(key)).rejects.toHaveProperty('code', 'ERR_NOT_FOUND'); }); it('batch operations commit correctly', async () => { @@ -65,13 +63,10 @@ describe('AztecDatastore with AztecLmdbStore', () => { batch.delete(key1); await batch.commit(); - try { - datastore.get(key1); // key1 should be deleted - } catch (err) { - expect(err).toHaveProperty('code', 'ERR_NOT_FOUND'); - } - const retrieved2 = datastore.get(key2); + // key1 should be deleted + await expect(datastore.get(key1)).rejects.toHaveProperty('code', 'ERR_NOT_FOUND'); + const retrieved2 = await datastore.get(key2); expect(retrieved2.toString()).toEqual(value2.toString()); // key2 should exist }); @@ -123,7 +118,7 @@ describe('AztecDatastore with AztecLmdbStore', () => { // Check that data remains accessible even if it's no longer in the memory map for (let i = 0; i < 10; i++) { - const result = datastore.get(new Key(`key${i}`)); + const result = await datastore.get(new Key(`key${i}`)); expect(result).toEqual(new Uint8Array([i])); } }); @@ -135,7 +130,7 @@ describe('AztecDatastore with AztecLmdbStore', () => { // Check data consistency for (let i = 0; i < 20; i++) { - const value = datastore.get(new Key(`key${i}`)); + const value = await datastore.get(new Key(`key${i}`)); expect(value).toEqual(new Uint8Array([i])); } }); @@ -185,7 +180,7 @@ export function interfaceDatastoreTests(test: I const v = uint8ArrayFromString('one'); await store.put(k, v); - expect(store.get(k)).toEqual(v); + await expect(store.get(k)).resolves.toEqual(v); }); it('parallel', async () => { diff --git a/yarn-project/p2p/src/services/data_store.ts b/yarn-project/p2p/src/services/data_store.ts index 32177b090774..37161d57bfac 100644 --- a/yarn-project/p2p/src/services/data_store.ts +++ b/yarn-project/p2p/src/services/data_store.ts @@ -1,5 +1,5 @@ import { filter, map, sort, take } from '@aztec/foundation/iterable'; -import type { AztecKVStore, AztecMap } from '@aztec/kv-store'; +import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store'; import { type Batch, type Datastore, Key, type KeyQuery, type Pair, type Query } from 'interface-datastore'; import type { AwaitIterable } from 'interface-store'; @@ -25,31 +25,31 @@ class KeyNotFoundError extends Error { export class AztecDatastore implements Datastore { #memoryDatastore: Map; - #dbDatastore: AztecMap; + #dbDatastore: AztecAsyncMap; #batchOps: BatchOp[] = []; private maxMemoryItems: number; - constructor(db: AztecKVStore, { maxMemoryItems } = { maxMemoryItems: 50 }) { + constructor(db: AztecAsyncKVStore, { maxMemoryItems } = { maxMemoryItems: 50 }) { this.#memoryDatastore = new Map(); this.#dbDatastore = db.openMap('p2p_datastore'); this.maxMemoryItems = maxMemoryItems; } - has(key: Key): boolean { - return this.#memoryDatastore.has(key.toString()) || this.#dbDatastore.has(key.toString()); + async has(key: Key): Promise { + return this.#memoryDatastore.has(key.toString()) || (await this.#dbDatastore.hasAsync(key.toString())); } - get(key: Key): Uint8Array { + async get(key: Key): Promise { const keyStr = key.toString(); const memoryItem = this.#memoryDatastore.get(keyStr); if (memoryItem) { memoryItem.lastAccessedMs = Date.now(); return memoryItem.data; } - const dbItem = this.#dbDatastore.get(keyStr); + const dbItem = await this.#dbDatastore.getAsync(keyStr); if (!dbItem) { throw new KeyNotFoundError(`Key not found`); @@ -73,7 +73,7 @@ export class AztecDatastore implements Datastore { for await (const key of source) { yield { key, - value: this.get(key), + value: await this.get(key), }; } } @@ -202,7 +202,7 @@ export class AztecDatastore implements Datastore { }; } - for (const [key, value] of this.#dbDatastore.entries()) { + for await (const [key, value] of this.#dbDatastore.entriesAsync()) { if (!this.#memoryDatastore.has(key)) { yield { key: new Key(key), diff --git a/yarn-project/p2p/src/services/discv5/discv5_service.test.ts b/yarn-project/p2p/src/services/discv5/discv5_service.test.ts index 0ab9005b8bb7..b8540c732f3d 100644 --- a/yarn-project/p2p/src/services/discv5/discv5_service.test.ts +++ b/yarn-project/p2p/src/services/discv5/discv5_service.test.ts @@ -1,6 +1,6 @@ import { sleep } from '@aztec/foundation/sleep'; -import { type AztecKVStore } from '@aztec/kv-store'; -import { openTmpStore } from '@aztec/kv-store/lmdb'; +import { type AztecAsyncKVStore } from '@aztec/kv-store'; +import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; import { getTelemetryClient } from '@aztec/telemetry-client'; import { jest } from '@jest/globals'; @@ -31,7 +31,7 @@ const waitForPeers = (node: DiscV5Service, expectedCount: number): Promise describe('Discv5Service', () => { jest.setTimeout(10_000); - let store: AztecKVStore; + let store: AztecAsyncKVStore; let bootNode: BootstrapNode; let bootNodePeerId: PeerId; let basePort = 7890; @@ -46,7 +46,7 @@ describe('Discv5Service', () => { beforeEach(async () => { const telemetryClient = getTelemetryClient(); - store = openTmpStore(true); + store = await openTmpStore('test'); bootNode = new BootstrapNode(store, telemetryClient); await bootNode.start(baseConfig); bootNodePeerId = bootNode.getPeerId(); @@ -54,7 +54,7 @@ describe('Discv5Service', () => { afterEach(async () => { await bootNode.stop(); - await store.clear(); + await store.close(); }); it('should initialize with default values', async () => { @@ -66,6 +66,7 @@ describe('Discv5Service', () => { const peers = node.getAllPeers(); const bootnode = peers[0]; expect((await bootnode.peerId()).toString()).toEqual(bootNodePeerId.toString()); + await node.stop(); }); it('should discover & add a peer', async () => { diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts index 27ea23e972df..f722d7c691c9 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts @@ -23,7 +23,7 @@ import { type EpochCache } from '@aztec/epoch-cache'; import { createLogger } from '@aztec/foundation/log'; import { SerialQueue } from '@aztec/foundation/queue'; import { RunningPromise } from '@aztec/foundation/running-promise'; -import type { AztecKVStore } from '@aztec/kv-store'; +import type { AztecAsyncKVStore } from '@aztec/kv-store'; import { Attributes, OtelMetricsAdapter, type TelemetryClient, WithTracer, trackSpan } from '@aztec/telemetry-client'; import { type ENR } from '@chainsafe/enr'; @@ -167,7 +167,7 @@ export class LibP2PService extends WithTracer implement epochCache: EpochCache, proofVerifier: ClientProtocolCircuitVerifier, worldStateSynchronizer: WorldStateSynchronizer, - store: AztecKVStore, + store: AztecAsyncKVStore, telemetry: TelemetryClient, ) { const { tcpListenAddress, tcpAnnounceAddress, minPeerCount, maxPeerCount } = config; @@ -910,7 +910,7 @@ export class LibP2PService extends WithTracer implement // Libp2p seems to hang sometimes if new peers are initiating connections. private async stopLibP2P() { const TIMEOUT_MS = 5000; // 5 seconds timeout - const timeout = new Promise((resolve, reject) => { + const timeout = new Promise((_resolve, reject) => { setTimeout(() => reject(new Error('Timeout during libp2p.stop()')), TIMEOUT_MS); }); try { diff --git a/yarn-project/p2p/src/services/reqresp/protocols/tx.ts b/yarn-project/p2p/src/services/reqresp/protocols/tx.ts index 415cf4293c65..1d04f7983068 100644 --- a/yarn-project/p2p/src/services/reqresp/protocols/tx.ts +++ b/yarn-project/p2p/src/services/reqresp/protocols/tx.ts @@ -20,10 +20,10 @@ export function reqRespTxHandler(mempools: MemPools) * @param msg - the tx request message * @returns the tx response message */ - return (_peerId: PeerId, msg: Buffer) => { + return async (_peerId: PeerId, msg: Buffer) => { const txHash = TxHash.fromBuffer(msg); - const foundTx = mempools.txPool.getTxByHash(txHash); + const foundTx = await mempools.txPool.getTxByHash(txHash); const buf = foundTx ? foundTx.toBuffer() : Buffer.alloc(0); - return Promise.resolve(buf); + return buf; }; } diff --git a/yarn-project/p2p/src/services/reqresp/reqresp.integration.test.ts b/yarn-project/p2p/src/services/reqresp/reqresp.integration.test.ts index 550b15379548..334aaaecfe92 100644 --- a/yarn-project/p2p/src/services/reqresp/reqresp.integration.test.ts +++ b/yarn-project/p2p/src/services/reqresp/reqresp.integration.test.ts @@ -63,7 +63,7 @@ describe('Req Resp p2p client integration', () => { epochCache = mock(); txPool.getAllTxs.mockImplementation(() => { - return [] as Tx[]; + return Promise.resolve([] as Tx[]); }); }); @@ -193,7 +193,7 @@ describe('Req Resp p2p client integration', () => { const tx = await mockTx(); const txHash = await tx.getTxHash(); // Mock the tx pool to return the tx we are looking for - txPool.getTxByHash.mockImplementationOnce(() => tx); + txPool.getTxByHash.mockImplementationOnce(() => Promise.resolve(tx)); const requestedTx = await client1.requestTxByHash(txHash); @@ -223,7 +223,7 @@ describe('Req Resp p2p client integration', () => { const txHash = await tx.getTxHash(); // Return the correct tx with an invalid proof -> active attack - txPool.getTxByHash.mockImplementationOnce(() => tx); + txPool.getTxByHash.mockImplementationOnce(() => Promise.resolve(tx)); const requestedTx = await client1.requestTxByHash(txHash); // Even though we got a response, the proof was deemed invalid @@ -256,7 +256,7 @@ describe('Req Resp p2p client integration', () => { const tx2 = await mockTx(420); // Return an invalid tx - txPool.getTxByHash.mockImplementationOnce(() => tx2); + txPool.getTxByHash.mockImplementationOnce(() => Promise.resolve(tx2)); const requestedTx = await client1.requestTxByHash(txHash); // Even though we got a response, the proof was deemed invalid diff --git a/yarn-project/p2p/src/util.ts b/yarn-project/p2p/src/util.ts index 2f88d45095a4..b9a3f7e85a84 100644 --- a/yarn-project/p2p/src/util.ts +++ b/yarn-project/p2p/src/util.ts @@ -1,4 +1,4 @@ -import { type AztecKVStore, type AztecSingleton } from '@aztec/kv-store'; +import { type AztecAsyncKVStore } from '@aztec/kv-store'; import { type DataStoreConfig } from '@aztec/kv-store/config'; import type { GossipSub } from '@chainsafe/libp2p-gossipsub'; @@ -157,14 +157,17 @@ export async function configureP2PClientAddresses( * 3. If not, create a new one, then persist it in the node * */ -export async function getPeerIdPrivateKey(config: { peerIdPrivateKey?: string }, store: AztecKVStore): Promise { - const peerIdPrivateKeySingleton: AztecSingleton = store.openSingleton('peerIdPrivateKey'); +export async function getPeerIdPrivateKey( + config: { peerIdPrivateKey?: string }, + store: AztecAsyncKVStore, +): Promise { + const peerIdPrivateKeySingleton = store.openSingleton('peerIdPrivateKey'); if (config.peerIdPrivateKey) { await peerIdPrivateKeySingleton.set(config.peerIdPrivateKey); return config.peerIdPrivateKey; } - const storedPeerIdPrivateKey = peerIdPrivateKeySingleton.get(); + const storedPeerIdPrivateKey = await peerIdPrivateKeySingleton.getAsync(); if (storedPeerIdPrivateKey) { return storedPeerIdPrivateKey; } diff --git a/yarn-project/package.json b/yarn-project/package.json index 8c4b380b3bc1..fc206f3d9f1f 100644 --- a/yarn-project/package.json +++ b/yarn-project/package.json @@ -46,6 +46,7 @@ "kv-store", "l1-artifacts", "merkle-tree", + "native", "ivc-integration", "noir-bb-bench", "noir-contracts.js", diff --git a/yarn-project/prover-client/src/proving_broker/proving_broker.ts b/yarn-project/prover-client/src/proving_broker/proving_broker.ts index 239ca9ee750d..583fa0eb8037 100644 --- a/yarn-project/prover-client/src/proving_broker/proving_broker.ts +++ b/yarn-project/prover-client/src/proving_broker/proving_broker.ts @@ -178,8 +178,6 @@ export class ProvingBroker implements ProvingJobProducer, ProvingJobConsumer, Tr this.instrumentation.monitorActiveJobs(this.countActiveJobs); this.started = true; - - return Promise.resolve(); } public async stop(): Promise { diff --git a/yarn-project/prover-client/src/proving_broker/proving_broker_database/persisted.ts b/yarn-project/prover-client/src/proving_broker/proving_broker_database/persisted.ts index 31a89a81c962..f27224049a56 100644 --- a/yarn-project/prover-client/src/proving_broker/proving_broker_database/persisted.ts +++ b/yarn-project/prover-client/src/proving_broker/proving_broker_database/persisted.ts @@ -5,11 +5,10 @@ import { ProvingJobSettledResult, getEpochFromProvingJobId, } from '@aztec/circuit-types'; -import { toArray } from '@aztec/foundation/iterable'; import { jsonParseWithSchema, jsonStringify } from '@aztec/foundation/json-rpc'; import { type Logger, createLogger } from '@aztec/foundation/log'; -import { type AztecMap } from '@aztec/kv-store'; -import { AztecLmdbStore } from '@aztec/kv-store/lmdb'; +import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store'; +import { AztecLMDBStoreV2 } from '@aztec/kv-store/lmdb-v2'; import { Attributes, LmdbMetrics, type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-client'; import { mkdir, readdir } from 'fs/promises'; @@ -19,10 +18,10 @@ import { type ProverBrokerConfig } from '../config.js'; import { type ProvingBrokerDatabase } from '../proving_broker_database.js'; class SingleEpochDatabase { - private jobs: AztecMap; - private jobResults: AztecMap; + private jobs: AztecAsyncMap; + private jobResults: AztecAsyncMap; - constructor(public readonly store: AztecLmdbStore) { + constructor(public readonly store: AztecAsyncKVStore) { this.jobs = store.openMap('proving_jobs'); this.jobResults = store.openMap('proving_job_results'); } @@ -36,9 +35,9 @@ class SingleEpochDatabase { } async *allProvingJobs(): AsyncIterableIterator<[ProvingJob, ProvingJobSettledResult | undefined]> { - for (const jobStr of this.jobs.values()) { + for await (const jobStr of this.jobs.valuesAsync()) { const job = await jsonParseWithSchema(jobStr, ProvingJob); - const resultStr = this.jobResults.get(job.id); + const resultStr = await this.jobResults.getAsync(job.id); const result = resultStr ? await jsonParseWithSchema(resultStr, ProvingJobSettledResult) : undefined; yield [job, result]; } @@ -81,8 +80,8 @@ export class KVBrokerDatabase implements ProvingBrokerDatabase { ); } - private estimateSize() { - const sizes = Array.from(this.epochs.values()).map(x => x.estimateSize()); + private async estimateSize() { + const sizes = await Promise.all(Array.from(this.epochs.values()).map(x => x.estimateSize())); return { mappingSize: this.config.dataStoreMapSizeKB, numItems: sizes.reduce((prev, curr) => prev + curr.numItems, 0), @@ -111,7 +110,7 @@ export class KVBrokerDatabase implements ProvingBrokerDatabase { logger.info( `Loading broker database for epoch ${epochNumber} from ${fullDirectory} with map size ${config.dataStoreMapSizeKB}KB`, ); - const db = AztecLmdbStore.open(fullDirectory, config.dataStoreMapSizeKB, false); + const db = await AztecLMDBStoreV2.new(fullDirectory, config.dataStoreMapSizeKB); const epochDb = new SingleEpochDatabase(db); epochs.set(epochNumber, epochDb); } @@ -145,7 +144,7 @@ export class KVBrokerDatabase implements ProvingBrokerDatabase { this.logger.info( `Creating broker database for epoch ${job.epochNumber} at ${newEpochDirectory} with map size ${this.config.dataStoreMapSizeKB}`, ); - const db = AztecLmdbStore.open(newEpochDirectory, this.config.dataStoreMapSizeKB, false); + const db = await AztecLMDBStoreV2.new(newEpochDirectory, this.config.dataStoreMapSizeKB); epochDb = new SingleEpochDatabase(db); this.epochs.set(job.epochNumber, epochDb); } @@ -153,7 +152,7 @@ export class KVBrokerDatabase implements ProvingBrokerDatabase { } async *allProvingJobs(): AsyncIterableIterator<[ProvingJob, ProvingJobSettledResult | undefined]> { - const iterators = (await toArray(this.epochs.values())).map(x => x.allProvingJobs()); + const iterators = Array.from(this.epochs.values()).map(x => x.allProvingJobs()); for (const it of iterators) { yield* it; } diff --git a/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts b/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts index 1c1a4ae2cf23..b7b58425128a 100644 --- a/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts @@ -117,8 +117,10 @@ describe('sequencer', () => { }; const mockPendingTxs = (txs: Tx[]) => { - p2p.getPendingTxCount.mockReturnValue(Promise.resolve(txs.length)); - p2p.iteratePendingTxs.mockReturnValue(mockTxIterator(Promise.resolve(txs))); + p2p.getPendingTxCount.mockResolvedValue(txs.length); + // make sure a new iterator is created for every invocation of iteratePendingTxs + // otherwise we risk iterating over the same iterator more than once (yielding no more values) + p2p.iteratePendingTxs.mockImplementation(() => mockTxIterator(Promise.resolve(txs))); }; const makeBlock = async (txs: Tx[]) => { diff --git a/yarn-project/sequencer-client/src/sequencer/sequencer.ts b/yarn-project/sequencer-client/src/sequencer/sequencer.ts index da16078c4d44..0b7a475ed86a 100644 --- a/yarn-project/sequencer-client/src/sequencer/sequencer.ts +++ b/yarn-project/sequencer-client/src/sequencer/sequencer.ts @@ -405,7 +405,7 @@ export class Sequencer { * @param opts - Whether to just validate the block as a validator, as opposed to building it as a proposal */ protected async buildBlock( - pendingTxs: Iterable | AsyncIterableIterator, + pendingTxs: Iterable | AsyncIterable, newGlobalVariables: GlobalVariables, opts: { validateOnly?: boolean } = {}, ) { @@ -549,7 +549,7 @@ export class Sequencer { [Attributes.BLOCK_NUMBER]: proposalHeader.globalVariables.blockNumber.toNumber(), })) private async buildBlockAndEnqueuePublish( - pendingTxs: AsyncIterableIterator, + pendingTxs: Iterable | AsyncIterable, proposalHeader: BlockHeader, ): Promise { await this.publisher.validateBlockForSubmission(proposalHeader); diff --git a/yarn-project/sequencer-client/tsconfig.json b/yarn-project/sequencer-client/tsconfig.json index a5653fa17fd1..02644b0fbec6 100644 --- a/yarn-project/sequencer-client/tsconfig.json +++ b/yarn-project/sequencer-client/tsconfig.json @@ -21,6 +21,9 @@ { "path": "../circuits.js" }, + { + "path": "../epoch-cache" + }, { "path": "../ethereum" }, diff --git a/yarn-project/simulator/src/public/public_processor.ts b/yarn-project/simulator/src/public/public_processor.ts index 4db45b532e61..0bd46e4a1043 100644 --- a/yarn-project/simulator/src/public/public_processor.ts +++ b/yarn-project/simulator/src/public/public_processor.ts @@ -137,7 +137,7 @@ export class PublicProcessor implements Traceable { * @returns The list of processed txs with their circuit simulation outputs. */ public async process( - txs: Iterable | AsyncIterableIterator, + txs: Iterable | AsyncIterable, limits: { maxTransactions?: number; maxBlockSize?: number; diff --git a/yarn-project/telemetry-client/src/lmdb_metrics.ts b/yarn-project/telemetry-client/src/lmdb_metrics.ts index cdd752997437..9332e3e807c1 100644 --- a/yarn-project/telemetry-client/src/lmdb_metrics.ts +++ b/yarn-project/telemetry-client/src/lmdb_metrics.ts @@ -7,7 +7,7 @@ import { ValueType, } from './telemetry.js'; -export type LmdbStatsCallback = () => { mappingSize: number; numItems: number; actualSize: number }; +export type LmdbStatsCallback = () => Promise<{ mappingSize: number; numItems: number; actualSize: number }>; export class LmdbMetrics { private dbMapSize: ObservableGauge; @@ -18,10 +18,12 @@ export class LmdbMetrics { this.dbMapSize = meter.createObservableGauge(DB_MAP_SIZE, { description: 'LMDB Map Size', valueType: ValueType.INT, + unit: 'By', }); this.dbUsedSize = meter.createObservableGauge(DB_USED_SIZE, { description: 'LMDB Used Size', valueType: ValueType.INT, + unit: 'By', }); this.dbNumItems = meter.createObservableGauge(DB_NUM_ITEMS, { description: 'LMDB Num Items', @@ -31,11 +33,11 @@ export class LmdbMetrics { meter.addBatchObservableCallback(this.recordDBMetrics, [this.dbMapSize, this.dbUsedSize, this.dbNumItems]); } - private recordDBMetrics = (observable: BatchObservableResult) => { + private recordDBMetrics = async (observable: BatchObservableResult) => { if (!this.getStats) { return; } - const metrics = this.getStats(); + const metrics = await this.getStats(); observable.observe(this.dbMapSize, metrics.mappingSize, this.attributes); observable.observe(this.dbNumItems, metrics.numItems, this.attributes); observable.observe(this.dbUsedSize, metrics.actualSize, this.attributes); diff --git a/yarn-project/tsconfig.json b/yarn-project/tsconfig.json index 45e18ab81290..6b2e594e275a 100644 --- a/yarn-project/tsconfig.json +++ b/yarn-project/tsconfig.json @@ -38,6 +38,7 @@ { "path": "key-store/tsconfig.json" }, { "path": "l1-artifacts/tsconfig.json" }, { "path": "merkle-tree/tsconfig.json" }, + { "path": "native/tsconfig.json" }, { "path": "noir-contracts.js/tsconfig.json" }, { "path": "builder/tsconfig.json" }, { "path": "noir-protocol-circuits-types/tsconfig.json" }, diff --git a/yarn-project/validator-client/src/validator.test.ts b/yarn-project/validator-client/src/validator.test.ts index 74e59a86301d..3f6e8d21755b 100644 --- a/yarn-project/validator-client/src/validator.test.ts +++ b/yarn-project/validator-client/src/validator.test.ts @@ -89,7 +89,7 @@ describe('ValidationService', () => { const proposal = await makeBlockProposal(); // mock the p2pClient.getTxStatus to return undefined for all transactions - p2pClient.getTxStatus.mockImplementation(() => undefined); + p2pClient.getTxStatus.mockResolvedValue(undefined); // Mock the p2pClient.requestTxs to return undefined for all transactions p2pClient.requestTxs.mockImplementation(() => Promise.resolve([undefined])); @@ -102,14 +102,14 @@ describe('ValidationService', () => { const proposal = await makeBlockProposal(); // mock the p2pClient.getTxStatus to return undefined for all transactions - p2pClient.getTxStatus.mockImplementation(() => undefined); + p2pClient.getTxStatus.mockResolvedValue(undefined); epochCache.getProposerInCurrentOrNextSlot.mockImplementation(async () => ({ currentProposer: await proposal.getSender(), nextProposer: await proposal.getSender(), currentSlot: proposal.slotNumber.toBigInt(), nextSlot: proposal.slotNumber.toBigInt() + 1n, })); - epochCache.isInCommittee.mockImplementation(() => Promise.resolve(true)); + epochCache.isInCommittee.mockResolvedValue(true); const val = ValidatorClient.new(config, epochCache, p2pClient); val.registerBlockBuilder(() => { diff --git a/yarn-project/watch.sh b/yarn-project/watch.sh index 55fd43e58e6c..4d6b26992b16 100755 --- a/yarn-project/watch.sh +++ b/yarn-project/watch.sh @@ -62,8 +62,8 @@ run_generate() { } cp_barretenberg_artifacts() { - mkdir -p world-state/build - cp $BARRETENBERG_OUT_DIR/lib/world_state_napi.node world-state/build/world_state_napi.node + mkdir -p native/build + cp $BARRETENBERG_OUT_DIR/lib/nodejs_module.node native/build/nodejs_module.node } # Remove all temp files with process or run ids on exit diff --git a/yarn-project/world-state/package.json b/yarn-project/world-state/package.json index a396423edcef..102155c5af75 100644 --- a/yarn-project/world-state/package.json +++ b/yarn-project/world-state/package.json @@ -15,18 +15,15 @@ "tsconfig": "./tsconfig.json" }, "scripts": { - "build": "yarn clean && yarn generate && tsc -b", - "build:cpp": "./scripts/build.sh cpp", + "build": "yarn clean && tsc -b", "build:dev": "tsc -b --watch", - "clean": "rm -rf ./dest ./build .tsbuildinfo", + "clean": "rm -rf ./dest .tsbuildinfo", "formatting": "run -T prettier --check ./src && run -T eslint ./src", "formatting:fix": "run -T eslint --fix ./src && run -T prettier -w ./src", - "generate": "mkdir -p build && cp -v ../../barretenberg/cpp/build-pic/lib/world_state_napi.node build", "test": "HARDWARE_CONCURRENCY=${HARDWARE_CONCURRENCY:-16} RAYON_NUM_THREADS=${RAYON_NUM_THREADS:-4} NODE_NO_WARNINGS=1 node --experimental-vm-modules ../node_modules/.bin/jest --passWithNoTests --maxWorkers=${JEST_MAX_WORKERS:-8}" }, "inherits": [ - "../package.common.json", - "./package.local.json" + "../package.common.json" ], "jest": { "moduleNameMapper": { @@ -67,17 +64,15 @@ "@aztec/foundation": "workspace:^", "@aztec/kv-store": "workspace:^", "@aztec/merkle-tree": "workspace:^", + "@aztec/native": "workspace:^", "@aztec/telemetry-client": "workspace:^", "@aztec/types": "workspace:^", - "bindings": "^1.5.0", - "msgpackr": "^1.10.2", "tslib": "^2.4.0", "zod": "^3.23.8" }, "devDependencies": { "@aztec/archiver": "workspace:^", "@jest/globals": "^29.5.0", - "@types/bindings": "^1.5.5", "@types/jest": "^29.5.0", "@types/levelup": "^5.1.2", "@types/memdown": "^3.0.0", diff --git a/yarn-project/world-state/package.local.json b/yarn-project/world-state/package.local.json deleted file mode 100644 index 3544f4fb8491..000000000000 --- a/yarn-project/world-state/package.local.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "scripts": { - "build": "yarn clean && yarn generate && tsc -b", - "clean": "rm -rf ./dest ./build .tsbuildinfo" - } -} diff --git a/yarn-project/world-state/scripts/build.sh b/yarn-project/world-state/scripts/build.sh deleted file mode 100755 index 17d34d494bee..000000000000 --- a/yarn-project/world-state/scripts/build.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash - -set -e - -cd "$(dirname "$0")/.." - -# relatiev path from the directory containing package.json -WORLD_STATE_LIB_PATH=../../barretenberg/cpp/build-pic/lib/world_state_napi.node -PRESET=${PRESET:-clang16-pic} - -build_addon() { - (cd ../../barretenberg/cpp; cmake --preset $PRESET -DCMAKE_BUILD_TYPE=RelWithAssert; cmake --build --preset $PRESET --target world_state_napi; echo $PWD; mkdir -p build/bin; cp ./build-pic/lib/world_state_napi.node ./build/bin/world_state_napi.node) -} - -cp_addon_lib() { - if [ -f $WORLD_STATE_LIB_PATH ]; then - echo "Copying $(realpath $WORLD_STATE_LIB_PATH) to build directory" - rm -rf build - mkdir build - cp $WORLD_STATE_LIB_PATH build/world_state_napi.node - else - echo "world_state_napi.node not found at $WORLD_STATE_LIB_PATH" - echo "Skipping copy to build directory" - echo "NativeWorldStateService will not work without this file" - fi -} - -build_ts() { - tsc -b . -} - -case $1 in - cpp) - build_addon - cp_addon_lib - ;; - ts) - cp_addon_lib - build_ts - ;; - *) - echo "Usage: $0 {cpp|ts}" - exit 1 - ;; -esac diff --git a/yarn-project/world-state/src/native/message.ts b/yarn-project/world-state/src/native/message.ts index a48bd189cddd..4b639f7b4b11 100644 --- a/yarn-project/world-state/src/native/message.ts +++ b/yarn-project/world-state/src/native/message.ts @@ -2,50 +2,6 @@ import { MerkleTreeId } from '@aztec/circuit-types'; import { AppendOnlyTreeSnapshot, Fr, type StateReference, type UInt32 } from '@aztec/circuits.js'; import { type Tuple } from '@aztec/foundation/serialize'; -export type MessageHeaderInit = { - /** The message ID. Optional, if not set defaults to 0 */ - messageId?: number; - /** Identifies the original request. Optional */ - requestId?: number; -}; - -export class MessageHeader { - /** An number to identify this message */ - public readonly messageId: number; - /** If this message is a response to a request, the messageId of the request */ - public readonly requestId: number; - - constructor({ messageId, requestId }: MessageHeaderInit) { - this.messageId = messageId ?? 0; - this.requestId = requestId ?? 0; - } - - static fromMessagePack(data: object): MessageHeader { - return new MessageHeader(data as MessageHeaderInit); - } -} - -interface TypedMessageLike { - msgType: number; - header: { - messageId?: number; - requestId?: number; - }; - value: any; -} - -export class TypedMessage { - public constructor(public readonly msgType: T, public readonly header: MessageHeader, public readonly value: B) {} - - static fromMessagePack(data: TypedMessageLike): TypedMessage { - return new TypedMessage(data['msgType'] as T, MessageHeader.fromMessagePack(data['header']), data['value']); - } - - static isTypedMessageLike(obj: any): obj is TypedMessageLike { - return typeof obj === 'object' && obj !== null && 'msgType' in obj && 'header' in obj && 'value' in obj; - } -} - export enum WorldStateMessageType { GET_TREE_INFO = 100, GET_STATE_REFERENCE, diff --git a/yarn-project/world-state/src/native/native_world_state_instance.ts b/yarn-project/world-state/src/native/native_world_state_instance.ts index 4db6072a2790..2ebcd9a49b22 100644 --- a/yarn-project/world-state/src/native/native_world_state_instance.ts +++ b/yarn-project/world-state/src/native/native_world_state_instance.ts @@ -1,7 +1,6 @@ import { MerkleTreeId } from '@aztec/circuit-types'; import { ARCHIVE_HEIGHT, - Fr, GeneratorIndex, L1_TO_L2_MSG_TREE_HEIGHT, MAX_NULLIFIERS_PER_TX, @@ -11,17 +10,13 @@ import { PUBLIC_DATA_TREE_HEIGHT, } from '@aztec/circuits.js'; import { createLogger } from '@aztec/foundation/log'; +import { NativeWorldState as BaseNativeWorldState, MsgpackChannel } from '@aztec/native'; import assert from 'assert'; -import bindings from 'bindings'; -import { Decoder, Encoder, addExtension } from 'msgpackr'; import { cpus } from 'os'; -import { isAnyArrayBuffer } from 'util/types'; import { type WorldStateInstrumentation } from '../instrumentation/instrumentation.js'; import { - MessageHeader, - TypedMessage, WorldStateMessageType, type WorldStateRequest, type WorldStateRequestCategories, @@ -32,24 +27,6 @@ import { } from './message.js'; import { WorldStateOpsQueue } from './world_state_ops_queue.js'; -// small extension to pack an NodeJS Fr instance to a representation that the C++ code can understand -// this only works for writes. Unpacking from C++ can't create Fr instances because the data is passed -// as raw, untagged, buffers. On the NodeJS side we don't know what the buffer represents -// Adding a tag would be a solution, but it would have to be done on both sides and it's unclear where else -// C++ fr instances are sent/received/stored. -addExtension({ - Class: Fr, - write: fr => fr.toBuffer(), -}); - -export interface NativeInstance { - call(msg: Buffer | Uint8Array): Promise; -} - -const NATIVE_LIBRARY_NAME = 'world_state_napi'; -const NATIVE_CLASS_NAME = 'WorldState'; - -const NATIVE_MODULE = bindings(NATIVE_LIBRARY_NAME); const MAX_WORLD_STATE_THREADS = +(process.env.HARDWARE_CONCURRENCY || '16'); export interface NativeWorldStateInstance { @@ -65,29 +42,11 @@ export interface NativeWorldStateInstance { export class NativeWorldState implements NativeWorldStateInstance { private open = true; - /** Each message needs a unique ID */ - private nextMessageId = 0; - - /** A long-lived msgpack encoder */ - private encoder = new Encoder({ - // always encode JS objects as MessagePack maps - // this makes it compatible with other MessagePack decoders - useRecords: false, - int64AsType: 'bigint', - }); - - /** A long-lived msgpack decoder */ - private decoder = new Decoder({ - useRecords: false, - int64AsType: 'bigint', - }); - - /** The actual native instance */ - private instance: any; - // We maintain a map of queue to fork private queues = new Map(); + private instance: MsgpackChannel; + /** Creates a new native WorldState instance */ constructor( dataDir: string, @@ -99,7 +58,7 @@ export class NativeWorldState implements NativeWorldStateInstance { log.info( `Creating world state data store at directory ${dataDir} with map size ${dbMapSizeKb} KB and ${threads} threads.`, ); - this.instance = new NATIVE_MODULE[NATIVE_CLASS_NAME]( + const ws = new BaseNativeWorldState( dataDir, { [MerkleTreeId.NULLIFIER_TREE]: NULLIFIER_TREE_HEIGHT, @@ -116,6 +75,7 @@ export class NativeWorldState implements NativeWorldStateInstance { dbMapSizeKb, threads, ); + this.instance = new MsgpackChannel(ws); // Manually create the queue for the canonical fork this.queues.set(0, new WorldStateOpsQueue()); } @@ -213,118 +173,62 @@ export class NativeWorldState implements NativeWorldStateInstance { messageType: T, body: WorldStateRequest[T] & WorldStateRequestCategories, ): Promise { - const messageId = this.nextMessageId++; + let logMetadata: Record = {}; + if (body) { - let data: Record = {}; if ('treeId' in body) { - data['treeId'] = MerkleTreeId[body.treeId]; + logMetadata['treeId'] = MerkleTreeId[body.treeId]; } if ('revision' in body) { - data = { ...data, ...body.revision }; + logMetadata = { ...logMetadata, ...body.revision }; } if ('forkId' in body) { - data['forkId'] = body.forkId; + logMetadata['forkId'] = body.forkId; } if ('blockNumber' in body) { - data['blockNumber'] = body.blockNumber; + logMetadata['blockNumber'] = body.blockNumber; } if ('toBlockNumber' in body) { - data['toBlockNumber'] = body.toBlockNumber; + logMetadata['toBlockNumber'] = body.toBlockNumber; } if ('leafIndex' in body) { - data['leafIndex'] = body.leafIndex; + logMetadata['leafIndex'] = body.leafIndex; } if ('blockHeaderHash' in body) { - data['blockHeaderHash'] = '0x' + body.blockHeaderHash.toString('hex'); + logMetadata['blockHeaderHash'] = '0x' + body.blockHeaderHash.toString('hex'); } if ('leaves' in body) { - data['leavesCount'] = body.leaves.length; + logMetadata['leavesCount'] = body.leaves.length; } // sync operation if ('paddedNoteHashes' in body) { - data['notesCount'] = body.paddedNoteHashes.length; - data['nullifiersCount'] = body.paddedNullifiers.length; - data['l1ToL2MessagesCount'] = body.paddedL1ToL2Messages.length; - data['publicDataWritesCount'] = body.publicDataWrites.length; + logMetadata['notesCount'] = body.paddedNoteHashes.length; + logMetadata['nullifiersCount'] = body.paddedNullifiers.length; + logMetadata['l1ToL2MessagesCount'] = body.paddedL1ToL2Messages.length; + logMetadata['publicDataWritesCount'] = body.publicDataWrites.length; } - - this.log.trace(`Calling messageId=${messageId} ${WorldStateMessageType[messageType]}`, data); - } else { - this.log.trace(`Calling messageId=${messageId} ${WorldStateMessageType[messageType]}`); } - const start = process.hrtime.bigint(); - - const request = new TypedMessage(messageType, new MessageHeader({ messageId }), body); - const encodedRequest = this.encoder.encode(request); - const encodingEnd = process.hrtime.bigint(); - const encodingDuration = Number(encodingEnd - start) / 1_000_000; - - let encodedResponse: any; try { - encodedResponse = await this.instance.call(encodedRequest); + const { duration, response } = await this.instance.sendMessage(messageType, body); + this.log.trace(`Call ${WorldStateMessageType[messageType]} took (ms)`, { + duration, + ...logMetadata, + }); + + this.instrumentation.recordRoundTrip(duration.totalUs, messageType); + return response; } catch (error) { - this.log.error(`Call messageId=${messageId} ${WorldStateMessageType[messageType]} failed: ${error}`); + this.log.error(`Call ${WorldStateMessageType[messageType]} failed: ${error}`, error, logMetadata); throw error; } - - const callEnd = process.hrtime.bigint(); - - const callDuration = Number(callEnd - encodingEnd) / 1_000_000; - - const buf = Buffer.isBuffer(encodedResponse) - ? encodedResponse - : isAnyArrayBuffer(encodedResponse) - ? Buffer.from(encodedResponse) - : encodedResponse; - - if (!Buffer.isBuffer(buf)) { - throw new TypeError( - 'Invalid encoded response: expected Buffer or ArrayBuffer, got ' + - (encodedResponse === null ? 'null' : typeof encodedResponse), - ); - } - - const decodedResponse = this.decoder.unpack(buf); - if (!TypedMessage.isTypedMessageLike(decodedResponse)) { - throw new TypeError( - 'Invalid response: expected TypedMessageLike, got ' + - (decodedResponse === null ? 'null' : typeof decodedResponse), - ); - } - - const response = TypedMessage.fromMessagePack(decodedResponse); - const decodingEnd = process.hrtime.bigint(); - const decodingDuration = Number(decodingEnd - callEnd) / 1_000_000; - const totalDuration = Number(decodingEnd - start) / 1_000_000; - this.log.trace(`Call messageId=${messageId} ${WorldStateMessageType[messageType]} took (ms)`, { - totalDuration, - encodingDuration, - callDuration, - decodingDuration, - }); - - if (response.header.requestId !== request.header.messageId) { - throw new Error( - 'Response ID does not match request: ' + response.header.requestId + ' != ' + request.header.messageId, - ); - } - - if (response.msgType !== messageType) { - throw new Error('Invalid response message type: ' + response.msgType + ' != ' + messageType); - } - - const callDurationUs = Number(callEnd - encodingEnd) / 1000; - this.instrumentation.recordRoundTrip(callDurationUs, messageType); - - return response.value; } } diff --git a/yarn-project/world-state/src/world-state-db/merkle_trees.ts b/yarn-project/world-state/src/world-state-db/merkle_trees.ts index a7ffcb2120b9..2da852c6a259 100644 --- a/yarn-project/world-state/src/world-state-db/merkle_trees.ts +++ b/yarn-project/world-state/src/world-state-db/merkle_trees.ts @@ -709,7 +709,7 @@ export class MerkleTrees implements MerkleTreeAdminDatabase { } await this.#snapshot(l2Block.number); - this.metrics.recordDbSize(this.store.estimateSize().actualSize); + this.metrics.recordDbSize((await this.store.estimateSize()).actualSize); this.metrics.recordSyncDuration('commit', timer); return buildEmptyWorldStateStatusFull(); } diff --git a/yarn-project/world-state/tsconfig.json b/yarn-project/world-state/tsconfig.json index 3a835f4686d6..db045786f0a8 100644 --- a/yarn-project/world-state/tsconfig.json +++ b/yarn-project/world-state/tsconfig.json @@ -21,6 +21,9 @@ { "path": "../merkle-tree" }, + { + "path": "../native" + }, { "path": "../telemetry-client" }, diff --git a/yarn-project/yarn.lock b/yarn-project/yarn.lock index ac0f525a7057..ebb61ee3187a 100644 --- a/yarn-project/yarn.lock +++ b/yarn-project/yarn.lock @@ -814,7 +814,7 @@ __metadata: "@noir-lang/noir_js": "file:../../noir/packages/noir_js" "@noir-lang/noirc_abi": "portal:../../noir/packages/noirc_abi" "@noir-lang/types": "portal:../../noir/packages/types" - "@playwright/test": "npm:^1.50.0" + "@playwright/test": "npm:1.49.0" "@types/jest": "npm:^29.5.0" "@types/node": "npm:^22.8.1" "@types/pako": "npm:^2.0.3" @@ -828,7 +828,7 @@ __metadata: levelup: "npm:^5.1.1" memdown: "npm:^6.1.1" pako: "npm:^2.1.0" - playwright: "npm:^1.50.0" + playwright: "npm:1.49.0" puppeteer: "npm:^22.4.1" resolve-typescript-plugin: "npm:^2.0.1" serve: "npm:^14.2.1" @@ -868,6 +868,7 @@ __metadata: "@aztec/circuits.js": "workspace:^" "@aztec/ethereum": "workspace:^" "@aztec/foundation": "workspace:^" + "@aztec/native": "workspace:^" "@jest/globals": "npm:^29.5.0" "@types/chai": "npm:^5.0.1" "@types/chai-as-promised": "npm:^8.0.1" @@ -875,6 +876,7 @@ __metadata: "@types/mocha": "npm:^10.0.10" "@types/mocha-each": "npm:^2.0.4" "@types/node": "npm:^18.7.23" + "@types/sinon": "npm:^17.0.3" "@web/dev-server-esbuild": "npm:^1.0.3" "@web/test-runner": "npm:^0.19.0" "@web/test-runner-playwright": "npm:^0.11.0" @@ -885,6 +887,9 @@ __metadata: lmdb: "npm:^3.2.0" mocha: "npm:^10.8.2" mocha-each: "npm:^2.0.1" + msgpackr: "npm:^1.11.2" + ordered-binary: "npm:^1.5.3" + sinon: "npm:^19.0.2" ts-node: "npm:^10.9.1" typescript: "npm:^5.0.4" languageName: unknown @@ -922,6 +927,23 @@ __metadata: languageName: unknown linkType: soft +"@aztec/native@workspace:^, @aztec/native@workspace:native": + version: 0.0.0-use.local + resolution: "@aztec/native@workspace:native" + dependencies: + "@aztec/foundation": "workspace:^" + "@jest/globals": "npm:^29.5.0" + "@types/bindings": "npm:^1.5.5" + "@types/jest": "npm:^29.5.0" + "@types/node": "npm:^18.7.23" + bindings: "npm:^1.5.0" + jest: "npm:^29.5.0" + msgpackr: "npm:^1.11.2" + ts-node: "npm:^10.9.1" + typescript: "npm:^5.0.4" + languageName: unknown + linkType: soft + "@aztec/noir-contracts.js@workspace:^, @aztec/noir-contracts.js@workspace:noir-contracts.js": version: 0.0.0-use.local resolution: "@aztec/noir-contracts.js@workspace:noir-contracts.js" @@ -1433,19 +1455,17 @@ __metadata: "@aztec/foundation": "workspace:^" "@aztec/kv-store": "workspace:^" "@aztec/merkle-tree": "workspace:^" + "@aztec/native": "workspace:^" "@aztec/telemetry-client": "workspace:^" "@aztec/types": "workspace:^" "@jest/globals": "npm:^29.5.0" - "@types/bindings": "npm:^1.5.5" "@types/jest": "npm:^29.5.0" "@types/levelup": "npm:^5.1.2" "@types/memdown": "npm:^3.0.0" "@types/node": "npm:^18.7.23" - bindings: "npm:^1.5.0" jest: "npm:^29.5.0" jest-mock-extended: "npm:^3.0.5" memdown: "npm:^6.1.1" - msgpackr: "npm:^1.10.2" ts-node: "npm:^10.9.1" tslib: "npm:^2.4.0" typescript: "npm:^5.0.4" @@ -4416,14 +4436,14 @@ __metadata: languageName: node linkType: hard -"@playwright/test@npm:^1.50.0": - version: 1.50.0 - resolution: "@playwright/test@npm:1.50.0" +"@playwright/test@npm:1.49.0": + version: 1.49.0 + resolution: "@playwright/test@npm:1.49.0" dependencies: - playwright: "npm:1.50.0" + playwright: "npm:1.49.0" bin: playwright: cli.js - checksum: 10/1fec2ed986205b57b03f24392bb01c6454c1f0a5c14204ce921afd51c3f5d61f20eddb3a18d36a02b19b3e3d731c7ff6bb7ba3c622aabc8fa3802021aef7d21b + checksum: 10/e87485ab4c02b6dc0bc20a43ea3965c949c45caa4e7f5beea4a0abd29be0a318662931e887072db0d165f8dde93709b97ea1b2c6f4c833b403aa13427d76dd22 languageName: node linkType: hard @@ -4794,7 +4814,7 @@ __metadata: languageName: node linkType: hard -"@sinonjs/commons@npm:^3.0.0": +"@sinonjs/commons@npm:^3.0.0, @sinonjs/commons@npm:^3.0.1": version: 3.0.1 resolution: "@sinonjs/commons@npm:3.0.1" dependencies: @@ -4812,6 +4832,33 @@ __metadata: languageName: node linkType: hard +"@sinonjs/fake-timers@npm:^13.0.1, @sinonjs/fake-timers@npm:^13.0.2": + version: 13.0.5 + resolution: "@sinonjs/fake-timers@npm:13.0.5" + dependencies: + "@sinonjs/commons": "npm:^3.0.1" + checksum: 10/11ee417968fc4dce1896ab332ac13f353866075a9d2a88ed1f6258f17cc4f7d93e66031b51fcddb8c203aa4d53fd980b0ae18aba06269f4682164878a992ec3f + languageName: node + linkType: hard + +"@sinonjs/samsam@npm:^8.0.1": + version: 8.0.2 + resolution: "@sinonjs/samsam@npm:8.0.2" + dependencies: + "@sinonjs/commons": "npm:^3.0.1" + lodash.get: "npm:^4.4.2" + type-detect: "npm:^4.1.0" + checksum: 10/58ca9752e8e835a09ed275f8edf8da2720fe95c0c02f6bcb90ad7f86fdceb393f35f744194b705dd94216228646ec0aedbb814e245eb869b940dcf1266b7a533 + languageName: node + linkType: hard + +"@sinonjs/text-encoding@npm:^0.7.3": + version: 0.7.3 + resolution: "@sinonjs/text-encoding@npm:0.7.3" + checksum: 10/f0cc89bae36e7ce159187dece7800b78831288f1913e9ae8cf8a878da5388232d2049740f6f4a43ec4b43b8ad1beb55f919f45eb9a577adb4a2a6eacb27b25fc + languageName: node + linkType: hard + "@swc/core-darwin-arm64@npm:1.5.5": version: 1.5.5 resolution: "@swc/core-darwin-arm64@npm:1.5.5" @@ -10104,6 +10151,13 @@ __metadata: languageName: node linkType: hard +"diff@npm:^7.0.0": + version: 7.0.0 + resolution: "diff@npm:7.0.0" + checksum: 10/e9b8e48d054c9c0c093c65ce8e2637af94b35f2427001607b14e5e0589e534ea3413a7f91ebe6d7c5a1494ace49cb7c7c3972f442ddd96a4767ff091999a082e + languageName: node + linkType: hard + "diffie-hellman@npm:^5.0.0": version: 5.0.3 resolution: "diffie-hellman@npm:5.0.3" @@ -14604,6 +14658,13 @@ __metadata: languageName: node linkType: hard +"just-extend@npm:^6.2.0": + version: 6.2.0 + resolution: "just-extend@npm:6.2.0" + checksum: 10/1f487b074b9e5773befdd44dc5d1b446f01f24f7d4f1f255d51c0ef7f686e8eb5f95d983b792b9ca5c8b10cd7e60a924d64103725759eddbd7f18bcb22743f92 + languageName: node + linkType: hard + "jwa@npm:^2.0.0": version: 2.0.0 resolution: "jwa@npm:2.0.0" @@ -15060,6 +15121,13 @@ __metadata: languageName: node linkType: hard +"lodash.get@npm:^4.4.2": + version: 4.4.2 + resolution: "lodash.get@npm:4.4.2" + checksum: 10/2a4925f6e89bc2c010a77a802d1ba357e17ed1ea03c2ddf6a146429f2856a216663e694a6aa3549a318cbbba3fd8b7decb392db457e6ac0b83dc745ed0a17380 + languageName: node + linkType: hard + "lodash.groupby@npm:^4.6.0": version: 4.6.0 resolution: "lodash.groupby@npm:4.6.0" @@ -15963,18 +16031,6 @@ __metadata: languageName: node linkType: hard -"msgpackr@npm:^1.10.2": - version: 1.10.2 - resolution: "msgpackr@npm:1.10.2" - dependencies: - msgpackr-extract: "npm:^3.0.2" - dependenciesMeta: - msgpackr-extract: - optional: true - checksum: 10/c422bed19f70d23b5f8945cb8e334cb9e773350b422d606794397c22260ef64a42a17284c5e14c2693203f871ecb18157dc47e2b8bd2e66d7764fcde3442a5c1 - languageName: node - linkType: hard - "msgpackr@npm:^1.11.2": version: 1.11.2 resolution: "msgpackr@npm:1.11.2" @@ -16153,6 +16209,19 @@ __metadata: languageName: node linkType: hard +"nise@npm:^6.1.1": + version: 6.1.1 + resolution: "nise@npm:6.1.1" + dependencies: + "@sinonjs/commons": "npm:^3.0.1" + "@sinonjs/fake-timers": "npm:^13.0.1" + "@sinonjs/text-encoding": "npm:^0.7.3" + just-extend: "npm:^6.2.0" + path-to-regexp: "npm:^8.1.0" + checksum: 10/2d3175587cf0a351e2c91eb643fdc59d266de39f394a3ac0bace38571749d1e7f25341d763899245139b8f0d2ee048b2d3387d75ecf94c4897e947d5fc881eea + languageName: node + linkType: hard + "no-case@npm:^2.2.0": version: 2.3.2 resolution: "no-case@npm:2.3.2" @@ -17039,6 +17108,13 @@ __metadata: languageName: node linkType: hard +"path-to-regexp@npm:^8.1.0": + version: 8.2.0 + resolution: "path-to-regexp@npm:8.2.0" + checksum: 10/23378276a172b8ba5f5fb824475d1818ca5ccee7bbdb4674701616470f23a14e536c1db11da9c9e6d82b82c556a817bbf4eee6e41b9ed20090ef9427cbb38e13 + languageName: node + linkType: hard + "path-type@npm:^4.0.0": version: 4.0.0 resolution: "path-type@npm:4.0.0" @@ -17236,27 +17312,27 @@ __metadata: languageName: node linkType: hard -"playwright-core@npm:1.50.0": - version: 1.50.0 - resolution: "playwright-core@npm:1.50.0" +"playwright-core@npm:1.49.0": + version: 1.49.0 + resolution: "playwright-core@npm:1.49.0" bin: playwright-core: cli.js - checksum: 10/0d27e52164bcc37ed5aeaa0c7efa6a0b3616cfbb01e206c26572bff8b8e5f0923a993369c826056cd7bee4b975508a1ec257b533098ee9db7bc5b75832110e4d + checksum: 10/ef9c708293adab100337ed7fd8e61660be381707fc2b84f07b5f40d1ead44feb6a8e52fef98075e594522229d15a9ad56dd1471689cfa59409bec6447c22944d languageName: node linkType: hard -"playwright@npm:1.50.0, playwright@npm:^1.22.2, playwright@npm:^1.50.0": - version: 1.50.0 - resolution: "playwright@npm:1.50.0" +"playwright@npm:1.49.0, playwright@npm:^1.22.2": + version: 1.49.0 + resolution: "playwright@npm:1.49.0" dependencies: fsevents: "npm:2.3.2" - playwright-core: "npm:1.50.0" + playwright-core: "npm:1.49.0" dependenciesMeta: fsevents: optional: true bin: playwright: cli.js - checksum: 10/53521f05c48ab51a37d6fa280a7c1e6486e2879f9997e877227517945faf195ce16829cf144709bba292c3023bcd07cf44a4dd965458c9adc30ea6fbe1f0f74a + checksum: 10/1fb198d09d388ec46cc2f0fc6b889a8bde8a75066ded82d35f08ba333091ebf3fc4ddf11263a86058a7078c7238ec4f23a86a9f1dc3ebd4f610c9eb07841fb32 languageName: node linkType: hard @@ -18916,6 +18992,20 @@ __metadata: languageName: node linkType: hard +"sinon@npm:^19.0.2": + version: 19.0.2 + resolution: "sinon@npm:19.0.2" + dependencies: + "@sinonjs/commons": "npm:^3.0.1" + "@sinonjs/fake-timers": "npm:^13.0.2" + "@sinonjs/samsam": "npm:^8.0.1" + diff: "npm:^7.0.0" + nise: "npm:^6.1.1" + supports-color: "npm:^7.2.0" + checksum: 10/0be47968e9352269d0bdd26cdae7ae4e67d94fa007e8417d1e66ac95ba8537214edc770aff01b0f5a6f07588a1f7d3c947fff9366d799db85d3a4c405b875460 + languageName: node + linkType: hard + "sisteransi@npm:^1.0.5": version: 1.0.5 resolution: "sisteransi@npm:1.0.5" @@ -19676,7 +19766,7 @@ __metadata: languageName: node linkType: hard -"supports-color@npm:^7.1.0": +"supports-color@npm:^7.1.0, supports-color@npm:^7.2.0": version: 7.2.0 resolution: "supports-color@npm:7.2.0" dependencies: @@ -20347,6 +20437,13 @@ __metadata: languageName: node linkType: hard +"type-detect@npm:^4.1.0": + version: 4.1.0 + resolution: "type-detect@npm:4.1.0" + checksum: 10/e363bf0352427a79301f26a7795a27718624c49c576965076624eb5495d87515030b207217845f7018093adcbe169b2d119bb9b7f1a31a92bfbb1ab9639ca8dd + languageName: node + linkType: hard + "type-fest@npm:^0.13.1": version: 0.13.1 resolution: "type-fest@npm:0.13.1"