diff --git a/.cargo/config b/.cargo/config new file mode 100644 index 00000000000..bc9809d889f --- /dev/null +++ b/.cargo/config @@ -0,0 +1,2 @@ +[alias] +xtask = "run --package massa-xtask --" \ No newline at end of file diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index 91e356abb79..7ea0be36fad 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -52,7 +52,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2023-02-27 + toolchain: nightly-2023-06-01 target: ${{ matrix.target }} override: true - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ae10dca4d57..7bd4d3e4198 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,7 +31,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2023-02-27 + toolchain: nightly-2023-06-01 components: rustfmt override: true - uses: Swatinem/rust-cache@v2 @@ -55,7 +55,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2023-02-27 + toolchain: nightly-2023-06-01 - uses: Swatinem/rust-cache@v2 with: shared-key: "check" @@ -80,7 +80,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2023-02-27 + toolchain: nightly-2023-06-01 components: clippy override: true - uses: Swatinem/rust-cache@v2 @@ -116,7 +116,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2023-02-27 + toolchain: nightly-2023-06-01 override: true - uses: Swatinem/rust-cache@v2 with: @@ -156,7 +156,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2023-02-27 + toolchain: nightly-2023-06-01 components: rustfmt override: true - uses: actions/checkout@v3 @@ -191,7 +191,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2023-02-27 + toolchain: nightly-2023-06-01 components: rustfmt override: true - uses: Swatinem/rust-cache@v2 diff --git a/Cargo.lock b/Cargo.lock index 46c5d79a66a..80be6ef4c07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -40,9 +40,9 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" +checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" dependencies = [ "aead", "aes", @@ -93,6 +93,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -138,7 +144,7 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "as-ffi-bindings" version = "0.2.5" -source = "git+https://github.com/massalabs/as-ffi-bindings.git?tag=v0.3.3#81a1cba61a20a6e5065341b00d7f751c7974e65b" +source = "git+https://github.com/massalabs/as-ffi-bindings.git?tag=v0.4.0#7767634dfc22407bd2b0fa0e4fd7432231b10dd7" dependencies = [ "anyhow", "wasmer", @@ -170,9 +176,9 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -181,9 +187,9 @@ version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -271,9 +277,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -302,8 +308,8 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "regex", "rustc-hash", "shlex", @@ -340,7 +346,7 @@ dependencies = [ "cc", "cfg-if", "constant_time_eq", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -380,7 +386,7 @@ dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", "proc-macro-crate 0.1.5", - "proc-macro2 1.0.56", + "proc-macro2 1.0.59", "syn 1.0.109", ] @@ -390,8 +396,8 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -401,8 +407,8 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -417,9 +423,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" +checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5" dependencies = [ "memchr", "serde", @@ -427,15 +433,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.1" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b1ce199063694f33ffb7dd4e0ee620741495c32833cde5aa08f02a0bf96f0c8" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bytecheck" -version = "0.6.10" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13fe11640a23eb24562225322cd3e452b93a3d4091d62fab69c70542fcd17d1f" +checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627" dependencies = [ "bytecheck_derive", "ptr_meta", @@ -444,12 +450,12 @@ dependencies = [ [[package]] name = "bytecheck_derive" -version = "0.6.10" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e31225543cb46f81a7e224762764f4a6a0f097b1db0b175f69e8065efaa42de5" +checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -514,25 +520,22 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", - "js-sys", - "num-integer", "num-traits", "serde", - "time 0.1.45", - "wasm-bindgen", "winapi", ] [[package]] name = "ciborium" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" +checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" dependencies = [ "ciborium-io", "ciborium-ll", @@ -541,15 +544,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" +checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" [[package]] name = "ciborium-ll" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" +checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" dependencies = [ "ciborium-io", "half", @@ -632,16 +635,6 @@ dependencies = [ "cc", ] -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] - [[package]] name = "config" version = "0.13.3" @@ -663,15 +656,15 @@ dependencies = [ [[package]] name = "console" -version = "0.15.5" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" +checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -720,22 +713,25 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.86.1" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "529ffacce2249ac60edba2941672dfedf3d96558b415d0d8083cd007456e0f55" +checksum = "2a2ab4512dfd3a6f4be184403a195f76e81a8a9f9e6c898e19d2dc3ce20e0115" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.86.1" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427d105f617efc8cb55f8d036a7fded2e227892d8780b4985e5551f8d27c4a92" +checksum = "98b022ed2a5913a38839dfbafe6cf135342661293b08049843362df4301261dc" dependencies = [ + "arrayvec", + "bumpalo", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", + "cranelift-egraph", "cranelift-entity", "cranelift-isle", "gimli 0.26.2", @@ -747,30 +743,44 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.86.1" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551674bed85b838d45358e3eab4f0ffaa6790c70dc08184204b9a54b41cdb7d1" +checksum = "639307b45434ad112a98f8300c0f0ab085cbefcd767efcdef9ef19d4c0756e74" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.86.1" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b3a63ae57498c3eb495360944a33571754241e15e47e3bcae6082f40fec5866" +checksum = "278e52e29c53fcf32431ef08406c295699a70306d05a0715c5b1bf50e33a9ab7" + +[[package]] +name = "cranelift-egraph" +version = "0.91.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624b54323b06e675293939311943ba82d323bb340468ce1889be5da7932c8d73" +dependencies = [ + "cranelift-entity", + "fxhash", + "hashbrown 0.12.3", + "indexmap", + "log", + "smallvec", +] [[package]] name = "cranelift-entity" -version = "0.86.1" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11aa8aa624c72cc1c94ea3d0739fa61248260b5b14d3646f51593a88d67f3e6e" +checksum = "9a59bcbca89c3f1b70b93ab3cbba5e5e0cbf3e63dadb23c7525cb142e21a9d4c" [[package]] name = "cranelift-frontend" -version = "0.86.1" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "544ee8f4d1c9559c9aa6d46e7aaeac4a13856d620561094f35527356c7d21bd0" +checksum = "0d70abacb8cfef3dc8ff7e8836e9c1d70f7967dfdac824a4cd5e30223415aca6" dependencies = [ "cranelift-codegen", "log", @@ -780,9 +790,9 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.86.1" +version = "0.91.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed16b14363d929b8c37e3c557d0a7396791b383ecc302141643c054343170aad" +checksum = "393bc73c451830ff8dbb3a07f61843d6cb41a084f9996319917c0b291ed785bb" [[package]] name = "crc32fast" @@ -873,7 +883,7 @@ dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset 0.8.0", + "memoffset", "scopeguard", ] @@ -907,16 +917,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" -dependencies = [ - "quote 1.0.26", - "syn 1.0.109", -] - [[package]] name = "ctr" version = "0.9.2" @@ -928,12 +928,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.5" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbcf33c2a618cbe41ee43ae6e9f2e48368cd9f9db2896f10167d8d762679f639" +checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" dependencies = [ "nix 0.26.2", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -950,57 +950,37 @@ dependencies = [ ] [[package]] -name = "cxx" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.94" +name = "darling" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2 1.0.56", - "quote 1.0.26", - "scratch", - "syn 2.0.15", + "darling_core 0.14.4", + "darling_macro 0.14.4", ] [[package]] -name = "cxxbridge-flags" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.94" +name = "darling" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" +checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "darling_core 0.20.1", + "darling_macro 0.20.1", ] [[package]] -name = "darling" -version = "0.20.1" +name = "darling_core" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ - "darling_core", - "darling_macro", + "fnv", + "ident_case", + "proc-macro2 1.0.59", + "quote 1.0.28", + "strsim 0.10.0", + "syn 1.0.109", ] [[package]] @@ -1011,10 +991,21 @@ checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "strsim 0.10.0", - "syn 2.0.15", + "syn 2.0.18", +] + +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core 0.14.4", + "quote 1.0.28", + "syn 1.0.109", ] [[package]] @@ -1023,9 +1014,9 @@ version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ - "darling_core", - "quote 1.0.26", - "syn 2.0.15", + "darling_core 0.20.1", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -1041,6 +1032,17 @@ dependencies = [ "parking_lot_core", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 1.0.109", +] + [[package]] name = "dialoguer" version = "0.10.4" @@ -1053,12 +1055,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "diff" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" - [[package]] name = "difflib" version = "0.4.0" @@ -1076,9 +1072,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "crypto-common", @@ -1132,9 +1128,9 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -1159,8 +1155,8 @@ dependencies = [ "byteorder", "lazy_static", "proc-macro-error", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -1232,8 +1228,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c134c37760b27a871ba422106eedbb8247da973a09e82558bf26d619c882b159" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -1244,8 +1240,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8ea75f31022cba043afe037940d73684327e915f88f62478e778c3de914cd0a" dependencies = [ "enum_delegate_lib", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -1255,31 +1251,31 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e1f6c3800b304a6be0012039e2a45a322a093539c45ab818d9e6895a39c90fe" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "rand 0.8.5", "syn 1.0.109", ] [[package]] name = "enumset" -version = "1.0.13" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b025475ad197bd8b4a9bdce339216b6cf3bd568bf2e107c286b51613f0b3cf" +checksum = "e875f1719c16de097dee81ed675e2d9bb63096823ed3f0ca827b7dea3028bbbb" dependencies = [ "enumset_derive", ] [[package]] name = "enumset_derive" -version = "0.7.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c2852ff17a4c9a2bb2abbca3074737919cb05dc24b0a8ca9498081a7033dd6" +checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af" dependencies = [ - "darling", - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "darling 0.20.1", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -1385,6 +1381,15 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "form_urlencoded" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +dependencies = [ + "percent-encoding", +] + [[package]] name = "fragile" version = "2.0.0" @@ -1466,9 +1471,9 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -1645,9 +1650,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes", "fnv", @@ -1762,7 +1767,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -1879,12 +1884,11 @@ dependencies = [ [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -1893,6 +1897,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -1924,9 +1938,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi 0.3.1", "libc", @@ -1959,9 +1973,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" dependencies = [ "wasm-bindgen", ] @@ -2071,8 +2085,8 @@ checksum = "c6027ac0b197ce9543097d02a290f550ce1d9432bf301524b013053c0b75cc94" dependencies = [ "heck 0.4.1", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -2135,9 +2149,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" dependencies = [ "cpufeatures", ] @@ -2162,9 +2176,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.142" +version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "libloading" @@ -2178,9 +2192,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "librocksdb-sys" @@ -2209,15 +2223,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "link-cplusplus" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" -dependencies = [ - "cc", -] - [[package]] name = "linked-hash-map" version = "0.5.6" @@ -2226,9 +2231,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "lock_api" @@ -2242,12 +2247,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" [[package]] name = "loupe" @@ -2265,17 +2267,18 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0fbfc88337168279f2e9ae06e157cfed4efd3316e14dc96ed074d4f2e6c5952" dependencies = [ - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] [[package]] -name = "lru" -version = "0.10.0" +name = "lsmtree" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03f1160296536f10c833a82dca22267d5486734230d47bf00bf435885814ba1e" +checksum = "316f66136f917f8cb04eac057855ec87dd56eb2c8e6ecd219ca8cbff4d9f2e03" dependencies = [ - "hashbrown 0.13.2", + "bytes", + "digest 0.10.7", ] [[package]] @@ -2310,7 +2313,7 @@ dependencies = [ [[package]] name = "massa-client" -version = "0.1.0" +version = "0.23.0" dependencies = [ "anyhow", "atty", @@ -2320,8 +2323,9 @@ dependencies = [ "lazy_static", "massa_api_exports", "massa_models", + "massa_proto", "massa_sdk", - "massa_signature 0.1.0", + "massa_signature", "massa_time", "massa_wallet", "paw", @@ -2338,7 +2342,7 @@ dependencies = [ [[package]] name = "massa-node" -version = "0.1.0" +version = "0.23.0" dependencies = [ "anyhow", "crossbeam-channel", @@ -2351,6 +2355,7 @@ dependencies = [ "massa_bootstrap", "massa_consensus_exports", "massa_consensus_worker", + "massa_db", "massa_executed_ops", "massa_execution_exports", "massa_execution_worker", @@ -2370,13 +2375,12 @@ dependencies = [ "massa_protocol_worker", "massa_storage", "massa_time", - "massa_versioning_worker", + "massa_versioning", "massa_wallet", "parking_lot", "paw", "peernet", "serde", - "serde_json", "structopt", "tokio", "tracing", @@ -2386,17 +2390,16 @@ dependencies = [ [[package]] name = "massa-sc-runtime" version = "0.10.0" -source = "git+https://github.com/massalabs/massa-sc-runtime?branch=main#bab919aa4a9cf1da0cf8aaf845a28dcdfc9d3631" +source = "git+https://github.com/massalabs/massa-sc-runtime?branch=main#4074cc64d32d9b1104f7c1d5c9f62844dff82ed1" dependencies = [ "anyhow", "as-ffi-bindings", - "base64 0.21.0", + "base64 0.21.2", "chrono", "displaydoc", "function_name", "glob", "loupe", - "massa_hash 0.1.0 (git+https://github.com/massalabs/massa)", "more-asserts 0.3.1", "num_enum", "parking_lot", @@ -2419,7 +2422,7 @@ dependencies = [ [[package]] name = "massa_api" -version = "0.1.0" +version = "0.23.0" dependencies = [ "async-trait", "futures", @@ -2429,15 +2432,16 @@ dependencies = [ "massa_api_exports", "massa_consensus_exports", "massa_execution_exports", - "massa_hash 0.1.0", + "massa_hash", "massa_models", "massa_pool_exports", "massa_pos_exports", "massa_protocol_exports", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", + "massa_serialization", + "massa_signature", "massa_storage", "massa_time", + "massa_versioning", "massa_wallet", "parking_lot", "serde", @@ -2451,18 +2455,19 @@ dependencies = [ [[package]] name = "massa_api_exports" -version = "0.1.0" +version = "0.23.0" dependencies = [ "displaydoc", "jsonrpsee", "massa_consensus_exports", "massa_execution_exports", "massa_final_state", - "massa_hash 0.1.0", + "massa_hash", "massa_models", "massa_protocol_exports", - "massa_signature 0.1.0", + "massa_signature", "massa_time", + "massa_versioning", "massa_wallet", "paginate", "serde", @@ -2473,33 +2478,29 @@ dependencies = [ [[package]] name = "massa_async_pool" -version = "0.1.0" +version = "0.23.0" dependencies = [ - "displaydoc", - "futures", - "lazy_static", - "massa_hash 0.1.0", + "massa_db", + "massa_hash", "massa_ledger_exports", - "massa_logging", + "massa_ledger_worker", "massa_models", "massa_proto", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", + "massa_serialization", + "massa_signature", "massa_time", "nom", "num", - "pretty_assertions", + "parking_lot", "rand 0.8.5", + "rocksdb", "serde", - "serde_json", - "serial_test 1.0.0", - "thiserror", - "tracing", + "tempfile", ] [[package]] name = "massa_bootstrap" -version = "0.1.0" +version = "0.23.0" dependencies = [ "bitvec", "crossbeam", @@ -2508,9 +2509,10 @@ dependencies = [ "lazy_static", "massa_async_pool", "massa_consensus_exports", + "massa_db", "massa_executed_ops", "massa_final_state", - "massa_hash 0.1.0", + "massa_hash", "massa_ledger_exports", "massa_ledger_worker", "massa_logging", @@ -2518,10 +2520,10 @@ dependencies = [ "massa_pos_exports", "massa_pos_worker", "massa_protocol_exports", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", + "massa_serialization", + "massa_signature", "massa_time", - "massa_versioning_worker", + "massa_versioning", "mio", "mockall", "nom", @@ -2539,35 +2541,31 @@ dependencies = [ [[package]] name = "massa_cipher" -version = "0.1.0" +version = "0.23.0" dependencies = [ "aes-gcm", "displaydoc", - "massa_serialization 0.1.0", + "massa_serialization", "pbkdf2", "rand 0.8.5", - "rand_core 0.6.4", - "serde", - "serde_json", - "serde_qs", "thiserror", ] [[package]] name = "massa_consensus_exports" -version = "0.1.0" +version = "0.23.0" dependencies = [ "crossbeam-channel", "displaydoc", "jsonrpsee", "massa_execution_exports", - "massa_hash 0.1.0", + "massa_hash", "massa_models", "massa_pool_exports", "massa_pos_exports", "massa_protocol_exports", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", + "massa_serialization", + "massa_signature", "massa_storage", "massa_time", "mockall", @@ -2580,47 +2578,67 @@ dependencies = [ [[package]] name = "massa_consensus_worker" -version = "0.1.0" +version = "0.23.0" dependencies = [ - "displaydoc", + "itertools", "massa_consensus_exports", - "massa_hash 0.1.0", + "massa_hash", "massa_logging", "massa_models", - "massa_signature 0.1.0", + "massa_signature", "massa_storage", "massa_time", "num", "parking_lot", - "serde", - "serde_json", + "rand 0.8.5", "tracing", ] [[package]] -name = "massa_executed_ops" +name = "massa_db" version = "0.1.0" dependencies = [ - "massa_hash 0.1.0", + "displaydoc", + "lsmtree", + "massa_hash", "massa_models", - "massa_serialization 0.1.0", + "massa_serialization", + "parking_lot", + "rocksdb", + "thiserror", +] + +[[package]] +name = "massa_executed_ops" +version = "0.23.0" +dependencies = [ + "massa_db", + "massa_hash", + "massa_ledger_worker", + "massa_models", + "massa_serialization", "nom", + "parking_lot", + "rocksdb", + "tempfile", ] [[package]] name = "massa_execution_exports" -version = "0.1.0" +version = "0.23.0" dependencies = [ "displaydoc", "massa-sc-runtime", "massa_final_state", - "massa_hash 0.1.0", + "massa_hash", "massa_ledger_exports", "massa_models", "massa_module_cache", "massa_proto", "massa_storage", "massa_time", + "massa_versioning", + "mockall", "num", "parking_lot", "tempfile", @@ -2630,27 +2648,28 @@ dependencies = [ [[package]] name = "massa_execution_worker" -version = "0.1.0" +version = "0.23.0" dependencies = [ "anyhow", "criterion", "hex-literal", "massa-sc-runtime", "massa_async_pool", + "massa_db", "massa_executed_ops", "massa_execution_exports", "massa_final_state", - "massa_hash 0.1.0", + "massa_hash", "massa_ledger_exports", "massa_ledger_worker", "massa_models", "massa_module_cache", "massa_pos_exports", "massa_pos_worker", - "massa_signature 0.1.0", + "massa_signature", "massa_storage", "massa_time", - "massa_versioning_worker", + "massa_versioning", "num", "parking_lot", "rand 0.8.5", @@ -2665,80 +2684,72 @@ dependencies = [ [[package]] name = "massa_factory_exports" -version = "0.1.0" +version = "0.23.0" dependencies = [ - "anyhow", "displaydoc", "massa_consensus_exports", - "massa_execution_exports", - "massa_hash 0.1.0", - "massa_ledger_exports", + "massa_hash", "massa_models", "massa_pool_exports", "massa_pos_exports", "massa_protocol_exports", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", + "massa_signature", "massa_storage", "massa_time", - "nom", - "num", - "serde", - "serde_json", "thiserror", - "tracing", ] [[package]] name = "massa_factory_worker" -version = "0.1.0" +version = "0.23.0" dependencies = [ - "anyhow", "crossbeam-channel", "massa_consensus_exports", "massa_factory_exports", - "massa_hash 0.1.0", + "massa_hash", "massa_models", "massa_pool_exports", "massa_pos_exports", "massa_protocol_exports", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", + "massa_signature", "massa_storage", "massa_time", + "massa_versioning", "massa_wallet", "parking_lot", - "serde", - "serde_json", - "serial_test 1.0.0", "tracing", ] [[package]] name = "massa_final_state" -version = "0.1.0" +version = "0.23.0" dependencies = [ "bs58", "displaydoc", "massa_async_pool", + "massa_db", "massa_executed_ops", - "massa_hash 0.1.0", "massa_ledger_exports", "massa_ledger_worker", "massa_models", "massa_pos_exports", + "massa_pos_worker", "massa_proto", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", + "massa_serialization", + "massa_time", + "massa_versioning", "nom", + "parking_lot", + "rocksdb", "serde", + "tempfile", "thiserror", "tracing", ] [[package]] name = "massa_grpc" -version = "0.1.0" +version = "0.23.0" dependencies = [ "crossbeam", "displaydoc", @@ -2748,15 +2759,16 @@ dependencies = [ "itertools", "massa_consensus_exports", "massa_execution_exports", - "massa_hash 0.1.0", + "massa_hash", "massa_models", "massa_pool_exports", "massa_pos_exports", "massa_proto", "massa_protocol_exports", - "massa_serialization 0.1.0", + "massa_serialization", "massa_storage", "massa_time", + "massa_versioning", "massa_wallet", "serde", "thiserror", @@ -2772,12 +2784,14 @@ dependencies = [ [[package]] name = "massa_hash" -version = "0.1.0" +version = "0.23.0" dependencies = [ "blake3", "bs58", "displaydoc", - "massa_serialization 0.1.0", + "generic-array", + "lsmtree", + "massa_serialization", "nom", "serde", "serde_json", @@ -2785,32 +2799,19 @@ dependencies = [ "thiserror", ] -[[package]] -name = "massa_hash" -version = "0.1.0" -source = "git+https://github.com/massalabs/massa#761c3458f2dd39f2db2baeaeb338ddcdc74922ab" -dependencies = [ - "blake3", - "bs58", - "displaydoc", - "massa_serialization 0.1.0 (git+https://github.com/massalabs/massa)", - "nom", - "serde", - "thiserror", -] - [[package]] name = "massa_ledger_exports" -version = "0.1.0" +version = "0.23.0" dependencies = [ "displaydoc", - "massa_hash 0.1.0", + "massa_db", + "massa_hash", "massa_models", "massa_proto", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", + "massa_serialization", "nom", "num_enum", + "rocksdb", "serde", "serde_json", "tempfile", @@ -2819,33 +2820,31 @@ dependencies = [ [[package]] name = "massa_ledger_worker" -version = "0.1.0" +version = "0.23.0" dependencies = [ - "massa_hash 0.1.0", + "massa_db", + "massa_hash", "massa_ledger_exports", "massa_models", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", - "nom", + "massa_serialization", + "massa_signature", + "parking_lot", "rocksdb", "serde_json", "tempfile", - "tracing", ] [[package]] name = "massa_logging" -version = "0.1.0" +version = "0.23.0" dependencies = [ - "pretty_assertions", "serde_json", - "serial_test 1.0.0", "tracing", ] [[package]] name = "massa_models" -version = "0.1.0" +version = "0.23.0" dependencies = [ "bitvec", "bs58", @@ -2853,10 +2852,10 @@ dependencies = [ "directories", "displaydoc", "lazy_static", - "massa_hash 0.1.0", + "massa_hash", "massa_proto", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", + "massa_serialization", + "massa_signature", "massa_time", "nom", "num", @@ -2866,18 +2865,19 @@ dependencies = [ "serde_with", "serial_test 1.0.0", "thiserror", + "transition", ] [[package]] name = "massa_module_cache" -version = "0.1.0" +version = "0.23.0" dependencies = [ "anyhow", "displaydoc", "massa-sc-runtime", - "massa_hash 0.1.0", + "massa_hash", "massa_models", - "massa_serialization 0.1.0", + "massa_serialization", "nom", "num_enum", "rand 0.8.5", @@ -2891,7 +2891,7 @@ dependencies = [ [[package]] name = "massa_pool_exports" -version = "0.1.0" +version = "0.23.0" dependencies = [ "crossbeam-channel", "massa_models", @@ -2904,17 +2904,18 @@ dependencies = [ [[package]] name = "massa_pool_worker" -version = "0.1.0" +version = "0.23.0" dependencies = [ "crossbeam-channel", "massa_execution_exports", - "massa_hash 0.1.0", + "massa_hash", "massa_models", "massa_pool_exports", "massa_pos_exports", - "massa_signature 0.1.0", + "massa_signature", "massa_storage", "massa_time", + "mockall", "num", "parking_lot", "tokio", @@ -2923,51 +2924,45 @@ dependencies = [ [[package]] name = "massa_pos_exports" -version = "0.1.0" +version = "0.23.0" dependencies = [ - "anyhow", "bitvec", "crossbeam-channel", "displaydoc", - "massa_hash 0.1.0", - "massa_ledger_exports", + "massa_db", + "massa_hash", "massa_models", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", - "massa_time", + "massa_serialization", + "massa_signature", + "mockall", "nom", "num", "parking_lot", + "rocksdb", "serde", "serde_json", + "tempfile", "thiserror", - "tokio", "tracing", ] [[package]] name = "massa_pos_worker" -version = "0.1.0" +version = "0.23.0" dependencies = [ - "bitvec", - "massa_final_state", - "massa_hash 0.1.0", + "massa_hash", "massa_models", "massa_pos_exports", - "massa_signature 0.1.0", - "massa_time", "parking_lot", "rand 0.8.5", "rand_distr", "rand_xoshiro", - "serde", - "serde_json", "tracing", ] [[package]] name = "massa_proto" -version = "0.1.0" +version = "0.23.0" dependencies = [ "glob", "prost", @@ -2979,15 +2974,16 @@ dependencies = [ [[package]] name = "massa_protocol_exports" -version = "0.1.0" +version = "0.23.0" dependencies = [ "displaydoc", - "massa_hash 0.1.0", + "massa_hash", "massa_models", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", + "massa_serialization", + "massa_signature", "massa_storage", "massa_time", + "massa_versioning", "mockall", "nom", "peernet", @@ -2999,26 +2995,27 @@ dependencies = [ [[package]] name = "massa_protocol_worker" -version = "0.1.0" +version = "0.23.0" dependencies = [ "crossbeam", - "lru", "massa_consensus_exports", - "massa_hash 0.1.0", + "massa_hash", "massa_logging", "massa_models", "massa_pool_exports", "massa_protocol_exports", - "massa_serialization 0.1.0", - "massa_signature 0.1.0", + "massa_serialization", + "massa_signature", "massa_storage", "massa_time", + "massa_versioning", "nom", "num_enum", "parking_lot", "peernet", "rand 0.8.5", "rayon", + "schnellru", "serde_json", "serial_test 2.0.0", "tempfile", @@ -3027,7 +3024,7 @@ dependencies = [ [[package]] name = "massa_sdk" -version = "0.1.0" +version = "0.23.0" dependencies = [ "http", "jsonrpsee", @@ -3035,23 +3032,16 @@ dependencies = [ "jsonrpsee-ws-client", "massa_api_exports", "massa_models", + "massa_proto", "massa_time", -] - -[[package]] -name = "massa_serialization" -version = "0.1.0" -dependencies = [ - "displaydoc", - "nom", "thiserror", - "unsigned-varint", + "tonic", + "tracing", ] [[package]] name = "massa_serialization" -version = "0.1.0" -source = "git+https://github.com/massalabs/massa#761c3458f2dd39f2db2baeaeb338ddcdc74922ab" +version = "0.23.0" dependencies = [ "displaydoc", "nom", @@ -3061,92 +3051,74 @@ dependencies = [ [[package]] name = "massa_signature" -version = "0.1.0" +version = "0.23.0" dependencies = [ "bs58", "displaydoc", "ed25519-dalek", - "massa_hash 0.1.0", - "massa_serialization 0.1.0", + "massa_hash", + "massa_serialization", "nom", "rand 0.7.3", "serde", "serde_json", "serial_test 1.0.0", "thiserror", -] - -[[package]] -name = "massa_signature" -version = "0.1.0" -source = "git+https://github.com/massalabs/massa#761c3458f2dd39f2db2baeaeb338ddcdc74922ab" -dependencies = [ - "bs58", - "displaydoc", - "ed25519-dalek", - "massa_hash 0.1.0 (git+https://github.com/massalabs/massa)", - "massa_serialization 0.1.0 (git+https://github.com/massalabs/massa)", - "nom", - "rand 0.7.3", - "serde", - "thiserror", + "transition", ] [[package]] name = "massa_storage" -version = "0.1.0" +version = "0.23.0" dependencies = [ "massa_factory_exports", - "massa_logging", "massa_models", - "massa_signature 0.1.0", + "massa_signature", "parking_lot", - "serde_json", - "tracing", ] [[package]] name = "massa_time" -version = "0.1.0" +version = "0.23.0" dependencies = [ "displaydoc", - "massa_serialization 0.1.0", + "massa_serialization", "nom", "serde", "thiserror", - "time 0.3.20", + "time", ] [[package]] -name = "massa_versioning_exports" -version = "0.1.0" - -[[package]] -name = "massa_versioning_worker" -version = "0.1.0" +name = "massa_versioning" +version = "0.23.0" dependencies = [ - "chrono", "machine", + "massa_db", + "massa_hash", "massa_models", - "massa_serialization 0.1.0", + "massa_proto", + "massa_serialization", + "massa_signature", "massa_time", "more-asserts 0.3.1", "nom", "num_enum", "parking_lot", + "tempfile", "thiserror", "tracing", ] [[package]] name = "massa_wallet" -version = "0.1.0" +version = "0.23.0" dependencies = [ "displaydoc", "massa_cipher", - "massa_hash 0.1.0", + "massa_hash", "massa_models", - "massa_signature 0.1.0", + "massa_signature", "serde", "serde_json", "serde_qs", @@ -3154,6 +3126,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "massa_xtask" +version = "0.23.0" +dependencies = [ + "massa_models", + "toml_edit", + "walkdir", +] + [[package]] name = "matchit" version = "0.7.0" @@ -3175,15 +3156,6 @@ dependencies = [ "libc", ] -[[package]] -name = "memoffset" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" -dependencies = [ - "autocfg", -] - [[package]] name = "memoffset" version = "0.8.0" @@ -3237,14 +3209,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -3269,8 +3241,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -3457,8 +3429,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -3491,9 +3463,9 @@ checksum = "3a74f2cda724d43a0a63140af89836d4e7db6138ef67c9f96d3a0f0150d05000" [[package]] name = "once_cell" -version = "1.17.1" +version = "1.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b" [[package]] name = "oorandom" @@ -3529,15 +3501,6 @@ version = "6.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" -[[package]] -name = "output_vt100" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" -dependencies = [ - "winapi", -] - [[package]] name = "overload" version = "0.1.1" @@ -3609,8 +3572,8 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f35583365be5d148e959284f42526841917b7bfa09e2d1a7ad5dde2cf0eaa39" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -3626,7 +3589,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "hmac", "password-hash", "sha2 0.10.6", @@ -3641,13 +3604,10 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "peernet" version = "0.1.0" -source = "git+https://github.com/massalabs/PeerNet?rev=1bb1f452bf63b78a89eb9542fb019b88d894c664#1bb1f452bf63b78a89eb9542fb019b88d894c664" +source = "git+https://github.com/massalabs/PeerNet?rev=bf8adf5#bf8adf50e2427a6692c65f0dbb53c69b504185e0" dependencies = [ "crossbeam", "enum_delegate", - "massa_hash 0.1.0 (git+https://github.com/massalabs/massa)", - "massa_serialization 0.1.0 (git+https://github.com/massalabs/massa)", - "massa_signature 0.1.0 (git+https://github.com/massalabs/massa)", "mio", "parking_lot", "quiche", @@ -3691,9 +3651,9 @@ checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -3719,22 +3679,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -3831,25 +3791,13 @@ dependencies = [ "termtree", ] -[[package]] -name = "pretty_assertions" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" -dependencies = [ - "ctor", - "diff", - "output_vt100", - "yansi", -] - [[package]] name = "prettyplease" version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.56", + "proc-macro2 1.0.59", "syn 1.0.109", ] @@ -3879,8 +3827,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", "version_check", ] @@ -3891,8 +3839,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "version_check", ] @@ -3907,18 +3855,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "6aeca18b86b413c660b781aa319e4e2648a3e6f9eadc9b47e9038e6fe9f3451b" dependencies = [ "unicode-ident", ] [[package]] name = "prost" -version = "0.11.9" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +checksum = "e48e50df39172a3e7eb17e14642445da64996989bc212b583015435d39a58537" dependencies = [ "bytes", "prost-derive", @@ -3926,9 +3874,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.9" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" +checksum = "2c828f93f5ca4826f97fedcbd3f9a536c16b12cff3dbbb4a007f932bbad95b12" dependencies = [ "bytes", "heck 0.4.1", @@ -3954,16 +3902,16 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "prost-types" -version = "0.11.9" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" +checksum = "379119666929a1afd7a043aa6cf96fa67a6dce9af60c88095a4686dbce4c9c88" dependencies = [ "prost", ] @@ -3983,8 +3931,8 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -4016,11 +3964,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.56", + "proc-macro2 1.0.59", ] [[package]] @@ -4182,9 +4130,9 @@ dependencies = [ [[package]] name = "regalloc2" -version = "0.3.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d43a209257d978ef079f3d446331d0f1794f5e0fc19b306a199983857833a779" +checksum = "300d4fbfb40c1c66a78ba3ddd41c1110247cf52f97b87d0f2fc9209bd49b030c" dependencies = [ "fxhash", "log", @@ -4194,9 +4142,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.1" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "81ca098a9821bd52d6b24fd8b10bd081f47d39c22778cafaa75a2857a62c6390" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -4205,9 +4153,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "region" @@ -4247,10 +4195,11 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.41" +version = "0.7.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21499ed91807f07ae081880aabb2ccc0235e9d88011867d984525e9a4c3cfa3e" +checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58" dependencies = [ + "bitvec", "bytecheck", "hashbrown 0.12.3", "indexmap", @@ -4258,16 +4207,18 @@ dependencies = [ "rend", "rkyv_derive", "seahash", + "tinyvec", + "uuid", ] [[package]] name = "rkyv_derive" -version = "0.7.41" +version = "0.7.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1c672430eb41556291981f45ca900a0239ad007242d1cb4b4167af842db666" +checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -4376,7 +4327,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", ] [[package]] @@ -4424,8 +4375,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "107c3d5d7f370ac09efa62a78375f94d94b8a33c61d8c278b96683fb4dbf2d8d" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -4470,12 +4421,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" - [[package]] name = "sct" version = "0.7.0" @@ -4494,9 +4439,9 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags", "core-foundation", @@ -4507,9 +4452,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -4523,9 +4468,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.160" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] @@ -4543,13 +4488,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.160" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -4587,7 +4532,7 @@ dependencies = [ "serde", "serde_json", "serde_with_macros", - "time 0.3.20", + "time", ] [[package]] @@ -4596,10 +4541,10 @@ version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" dependencies = [ - "darling", - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "darling 0.20.1", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -4636,8 +4581,8 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "079a83df15f85d89a68d64ae1238f142f172b1fa915d0d76b26a7cba1b659a69" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -4647,9 +4592,9 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -4686,7 +4631,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4804,9 +4749,9 @@ checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0" [[package]] name = "stream_limiter" -version = "1.2.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8c15323225cebaeebbf71fd83443d0cc7a7638a8d60b5f9f4108da65f755956" +checksum = "92b85d49eee44700a8dc9a3e2d316e8e1f7d661d3eccd606ad9d02e9969e04bc" [[package]] name = "strsim" @@ -4840,8 +4785,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] @@ -4861,8 +4806,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "rustversion", "syn 1.0.109", ] @@ -4872,15 +4817,15 @@ name = "substruct" version = "0.1.0" source = "git+https://github.com/sydhds/substruct#2fb3ae0dc9d913a0566ce6415eaa7a7ca1690fe1" dependencies = [ - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "subtle" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" @@ -4899,19 +4844,19 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.15" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "unicode-ident", ] @@ -4946,15 +4891,6 @@ dependencies = [ "windows-sys 0.45.0", ] -[[package]] -name = "termcolor" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" -dependencies = [ - "winapi-util", -] - [[package]] name = "termtree" version = "0.4.1" @@ -4991,16 +4927,16 @@ version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "thread-id" -version = "4.0.0" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fdfe0627923f7411a43ec9ec9c39c3a9b4151be313e0922042581fb6c9b717f" +checksum = "3ee93aa2b8331c0fec9091548843f2c90019571814057da3b783f9de09349d73" dependencies = [ "libc", "redox_syscall 0.2.16", @@ -5019,20 +4955,9 @@ dependencies = [ [[package]] name = "time" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.20" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" dependencies = [ "itoa", "serde", @@ -5042,15 +4967,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] @@ -5065,11 +4990,26 @@ dependencies = [ "serde_json", ] +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" -version = "1.28.0" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg", "bytes", @@ -5100,9 +5040,9 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -5153,15 +5093,15 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" [[package]] name = "toml_edit" -version = "0.19.8" +version = "0.19.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" +checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" dependencies = [ "indexmap", "toml_datetime", @@ -5177,7 +5117,7 @@ dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.0", + "base64 0.21.2", "bytes", "flate2", "futures-core", @@ -5202,14 +5142,14 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.9.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" +checksum = "0f60a933bbea70c95d633c04c951197ddf084958abaa2ed502a3743bdd8d8dd7" dependencies = [ "prettyplease", - "proc-macro2 1.0.56", + "proc-macro2 1.0.59", "prost-build", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] @@ -5245,7 +5185,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21b00ec4842256d1fe0a46176e2ef5bc357664c66e7d91aff5a7d43d83a65f47" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bytes", "futures-core", "http", @@ -5329,16 +5269,16 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -5369,6 +5309,26 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "transition" +version = "0.1.0" +source = "git+https://github.com/massalabs/transition.git?rev=93fa3bf82f9f5ff421c78536879b7fd1b948ca75#93fa3bf82f9f5ff421c78536879b7fd1b948ca75" +dependencies = [ + "transition-macros", +] + +[[package]] +name = "transition-macros" +version = "0.1.0" +source = "git+https://github.com/massalabs/transition.git?rev=93fa3bf82f9f5ff421c78536879b7fd1b948ca75#93fa3bf82f9f5ff421c78536879b7fd1b948ca75" +dependencies = [ + "darling 0.14.4", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 1.0.109", + "unsigned-varint", +] + [[package]] name = "try-lock" version = "0.2.4" @@ -5387,11 +5347,26 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" +[[package]] +name = "unicode-bidi" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" + [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] [[package]] name = "unicode-segmentation" @@ -5413,9 +5388,9 @@ checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" [[package]] name = "universal-hash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ "crypto-common", "subtle", @@ -5435,12 +5410,29 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "url" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + [[package]] name = "utf8parse" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +[[package]] +name = "uuid" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" + [[package]] name = "valuable" version = "0.1.0" @@ -5491,12 +5483,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -5505,9 +5491,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -5515,16 +5501,16 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-shared", ] @@ -5546,16 +5532,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5020cfa87c7cecefef118055d44e3c1fc122c7ec25701d528ee458a0b45f38f" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" dependencies = [ "cfg-if", "js-sys", @@ -5565,53 +5551,55 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" dependencies = [ - "quote 1.0.26", + "quote 1.0.28", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" [[package]] name = "wasm-encoder" -version = "0.26.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05d0b6fcd0aeb98adf16e7975331b3c17222aa815148f5b976370ce589d80ef" +checksum = "18c41dbd92eaebf3612a39be316540b8377c871cb9bde6b064af962984912881" dependencies = [ "leb128", ] [[package]] name = "wasmer" -version = "3.1.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "840af6d21701220cb805dc7201af301cb99e9b4f646f48a41befbc1d949f0f90" +checksum = "78caedecd8cb71ed47ccca03b68d69414a3d278bb031e6f93f15759344efdd52" dependencies = [ "bytes", "cfg-if", + "derivative", "indexmap", "js-sys", "more-asserts 0.2.2", + "rustc-demangle", "serde", "serde-wasm-bindgen", "target-lexicon", @@ -5629,9 +5617,9 @@ dependencies = [ [[package]] name = "wasmer-compiler" -version = "3.1.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b86fab98beaaace77380cb04e681773739473860d1b8499ea6b14f920923e0c5" +checksum = "726a8450541af4a57c34af7b6973fdbfc79f896cc7e733429577dfd1d1687180" dependencies = [ "backtrace", "cfg-if", @@ -5642,7 +5630,6 @@ dependencies = [ "memmap2", "more-asserts 0.2.2", "region", - "rustc-demangle", "smallvec", "thiserror", "wasmer-object", @@ -5654,9 +5641,9 @@ dependencies = [ [[package]] name = "wasmer-compiler-cranelift" -version = "3.1.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "015eef629fc84889540dc1686bd7fa524b93da9fd2d275b16c49dbe96268e58f" +checksum = "a1e5633f90f372563ebbdf3f9799c7b29ba11c90e56cf9b54017112d2e656c95" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -5673,9 +5660,9 @@ dependencies = [ [[package]] name = "wasmer-compiler-singlepass" -version = "3.1.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e235ccc192d5f39147e8a430f48040dcfeebc1f1b0d979d2232ec1618d255c" +checksum = "d4d38957de6f452115c0af3ff08cec268ee248d665b54d4bbf7da60b7453cb97" dependencies = [ "byteorder", "dynasm", @@ -5692,21 +5679,21 @@ dependencies = [ [[package]] name = "wasmer-derive" -version = "3.1.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff577b7c1cfcd3d7c5b3a09fe1a499b73f7c17084845ff71225c8250a6a63a9" +checksum = "97901fdbaae383dbb90ea162cc3a76a9fa58ac39aec7948b4c0b9bbef9307738" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.56", - "quote 1.0.26", + "proc-macro2 1.0.59", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "wasmer-middlewares" -version = "3.1.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3f7b2443d00487fcd63e0158ea2eb7a12253fcc99b1c73a7a89796f3cb5a10f" +checksum = "9e028013811035111beb768074b6ccc09eabd77811b1e01fd099b5471924ca16" dependencies = [ "wasmer", "wasmer-types", @@ -5715,9 +5702,9 @@ dependencies = [ [[package]] name = "wasmer-object" -version = "3.1.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b04028ab0209ee0883dcf91cb5b39b840f745f7fec3e8517c3c25f89c546c1" +checksum = "1b6a25e04fdd0f2173bebfce2804ac1fba5e45827afed76876bf414e74244aae" dependencies = [ "object 0.28.4", "thiserror", @@ -5726,10 +5713,11 @@ dependencies = [ [[package]] name = "wasmer-types" -version = "3.1.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9600f9da966abae3be0b0a4560e7d1f2c88415a2d01ce362ac06063cb1c473" +checksum = "67f1f2839f4f61509550e4ddcd0e658e19f3af862b51c79fda15549d735d659b" dependencies = [ + "bytecheck", "enum-iterator", "enumset", "indexmap", @@ -5741,20 +5729,23 @@ dependencies = [ [[package]] name = "wasmer-vm" -version = "3.1.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc68a7f0a003e6cb63845b7510065097d289553201d64afb9a5e1744da3c6a0" +checksum = "043118ec4f16d1714fed3aab758b502b864bd865e1d5188626c9ad290100563f" dependencies = [ "backtrace", "cc", "cfg-if", "corosensei", + "dashmap", + "derivative", "enum-iterator", + "fnv", "indexmap", "lazy_static", "libc", "mach", - "memoffset 0.6.5", + "memoffset", "more-asserts 0.2.2", "region", "scopeguard", @@ -5765,15 +5756,19 @@ dependencies = [ [[package]] name = "wasmparser" -version = "0.83.0" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718ed7c55c2add6548cca3ddd6383d738cd73b892df400e96b9aa876f0141d7a" +checksum = "f2ea896273ea99b15132414be1da01ab0d8836415083298ecaffbe308eaac87a" +dependencies = [ + "indexmap", + "url", +] [[package]] name = "wast" -version = "57.0.0" +version = "60.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eb0f5ed17ac4421193c7477da05892c2edafd67f9639e3c11a82086416662dc" +checksum = "bd06cc744b536e30387e72a48fdd492105b9c938bb4f415c39c616a7a0a697ad" dependencies = [ "leb128", "memchr", @@ -5783,18 +5778,18 @@ dependencies = [ [[package]] name = "wat" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab9ab0d87337c3be2bb6fc5cd331c4ba9fd6bcb4ee85048a0dd59ed9ecf92e53" +checksum = "5abe520f0ab205366e9ac7d3e6b2fc71de44e32a2b58f2ec871b6b575bdcea3b" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" dependencies = [ "js-sys", "wasm-bindgen", @@ -5802,9 +5797,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.23.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa54963694b65584e170cf5dc46aeb4dcaa5584e652ff5f3952e56d66aff0125" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" dependencies = [ "rustls-webpki", ] @@ -6077,12 +6072,6 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "yansi" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" - [[package]] name = "zeroize" version = "1.3.0" @@ -6098,9 +6087,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.26", - "syn 2.0.15", + "proc-macro2 1.0.59", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index b660bcc8f24..6204e075bd5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "massa-cipher", "massa-consensus-exports", "massa-consensus-worker", + "massa-db", "massa-executed-ops", "massa-execution-exports", "massa-execution-worker", @@ -35,10 +36,10 @@ members = [ "massa-final-state", "massa-pos-exports", "massa-pos-worker", - "massa-versioning-worker", - "massa-versioning-exports", + "massa-versioning", "massa-grpc", "massa-proto", + "massa-xtask", ] resolver = "2" @@ -53,5 +54,4 @@ opt-level = 3 # Speed-up the CI # Do not add as a regular dependency. Never. # # * sandbox: for testing purpose, genesis timestamps is set as now + 9 seconds. -# * create_snapshot: for saving the final_state on disk, alongside the ledger. # The saved snapshot can then be used to restart the network from the snapshot. diff --git a/massa-api-exports/Cargo.toml b/massa-api-exports/Cargo.toml index f99f512af9b..280fedff292 100644 --- a/massa-api-exports/Cargo.toml +++ b/massa-api-exports/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_api_exports" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -17,12 +17,12 @@ massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } massa_models = { path = "../massa-models" } massa_final_state = { path = "../massa-final-state" } - massa_consensus_exports = { path = "../massa-consensus-exports" } massa_hash = { path = "../massa-hash" } massa_protocol_exports = { path = "../massa-protocol-exports" } massa_execution_exports = { path = "../massa-execution-exports" } massa_wallet = { path = "../massa-wallet" } +massa_versioning = { path = "../massa-versioning" } [dev-dependencies] serial_test = "1.0.0" diff --git a/massa-api-exports/src/error.rs b/massa-api-exports/src/error.rs index 5bb9b2ff83d..d53edf1ddd2 100644 --- a/massa-api-exports/src/error.rs +++ b/massa-api-exports/src/error.rs @@ -9,6 +9,7 @@ use massa_hash::MassaHashError; use massa_models::error::ModelsError; use massa_protocol_exports::ProtocolError; use massa_time::TimeError; +use massa_versioning::versioning_factory::FactoryError; use massa_wallet::WalletError; /// Errors of the api component. @@ -47,6 +48,8 @@ pub enum ApiError { BadRequest(String), /// Internal server error: {0} InternalServerError(String), + /// Factory error: {0} + FactoryError(#[from] FactoryError), } impl From for ErrorObjectOwned { @@ -69,6 +72,7 @@ impl From for ErrorObjectOwned { ApiError::MissingCommandSender(_) => -32017, ApiError::MissingConfig(_) => -32018, ApiError::WrongAPI => -32019, + ApiError::FactoryError(_) => -32020, }; ErrorObject::owned(code, err.to_string(), None::<()>) diff --git a/massa-api-exports/src/node.rs b/massa-api-exports/src/node.rs index 8c96616bc8f..6f810162217 100644 --- a/massa-api-exports/src/node.rs +++ b/massa-api-exports/src/node.rs @@ -57,7 +57,7 @@ impl std::fmt::Display for NodeStatus { writeln!(f, "Config:\n{}", self.config)?; writeln!(f)?; - writeln!(f, "Current time: {}", self.current_time.to_utc_string())?; + writeln!(f, "Current time: {}", self.current_time.format_instant())?; writeln!(f, "Current cycle: {}", self.current_cycle)?; if self.last_slot.is_some() { writeln!(f, "Last slot: {}", self.last_slot.unwrap())?; diff --git a/massa-api/Cargo.toml b/massa-api/Cargo.toml index 75452b3209f..0c6d3b3618c 100644 --- a/massa-api/Cargo.toml +++ b/massa-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_api" -version = "0.1.0" +version = "0.23.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -28,8 +28,9 @@ massa_protocol_exports = { path = "../massa-protocol-exports" } massa_execution_exports = { path = "../massa-execution-exports" } massa_pos_exports = { path = "../massa-pos-exports" } massa_storage = { path = "../massa-storage" } -massa_serialization = { path = "../massa-serialization"} +massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } +massa_versioning = { path = "../massa-versioning" } massa_hash = { path = "../massa-hash" } -massa_wallet = { path = "../massa-wallet" } \ No newline at end of file +massa_wallet = { path = "../massa-wallet" } diff --git a/massa-api/src/lib.rs b/massa-api/src/lib.rs index de16c32a257..a74ce6b546a 100644 --- a/massa-api/src/lib.rs +++ b/massa-api/src/lib.rs @@ -38,6 +38,7 @@ use massa_pool_exports::{PoolChannels, PoolController}; use massa_pos_exports::SelectorController; use massa_protocol_exports::{ProtocolConfig, ProtocolController}; use massa_storage::Storage; +use massa_versioning::keypair_factory::KeyPairFactory; use massa_wallet::Wallet; use parking_lot::RwLock; use serde_json::Value; @@ -74,6 +75,8 @@ pub struct Public { pub version: Version, /// our node id pub node_id: NodeId, + /// keypair factory + pub keypair_factory: KeyPairFactory, } /// Private API content diff --git a/massa-api/src/private.rs b/massa-api/src/private.rs index 3851367fd1a..b6153befdde 100644 --- a/massa-api/src/private.rs +++ b/massa-api/src/private.rs @@ -19,19 +19,14 @@ use massa_api_exports::{ }; use massa_execution_exports::ExecutionController; use massa_hash::Hash; -use massa_models::clique::Clique; -use massa_models::composite::PubkeySig; -use massa_models::node::NodeId; -use massa_models::output_event::SCOutputEvent; -use massa_models::prehash::PreHashSet; use massa_models::{ - address::Address, block::Block, block_id::BlockId, endorsement::EndorsementId, - execution::EventFilter, operation::OperationId, slot::Slot, + address::Address, block::Block, block_id::BlockId, clique::Clique, composite::PubkeySig, + endorsement::EndorsementId, execution::EventFilter, node::NodeId, operation::OperationId, + output_event::SCOutputEvent, prehash::PreHashSet, slot::Slot, }; use massa_protocol_exports::{PeerId, ProtocolController}; -use massa_signature::{KeyPair, PUBLIC_KEY_SIZE_BYTES}; +use massa_signature::KeyPair; use massa_wallet::Wallet; - use parking_lot::RwLock; use std::collections::BTreeSet; use std::fs::{remove_file, OpenOptions}; @@ -164,14 +159,7 @@ impl MassaRpcServer for API { //TODO: Change when unify node id and peer id let peer_ids = ids .into_iter() - .map(|id| { - PeerId::from_bytes( - id.get_public_key().to_bytes()[..PUBLIC_KEY_SIZE_BYTES] - .try_into() - .unwrap(), - ) - .unwrap() - }) + .map(|id| PeerId::from_public_key(id.get_public_key())) .collect(); protocol_controller .ban_peers(peer_ids) @@ -183,14 +171,7 @@ impl MassaRpcServer for API { //TODO: Change when unify node id and peer id let peer_ids = ids .into_iter() - .map(|id| { - PeerId::from_bytes( - id.get_public_key().to_bytes()[..PUBLIC_KEY_SIZE_BYTES] - .try_into() - .unwrap(), - ) - .unwrap() - }) + .map(|id| PeerId::from_public_key(id.get_public_key())) .collect(); protocol_controller .unban_peers(peer_ids) diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 8bb9750e435..918d424aada 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -3,6 +3,7 @@ use crate::{MassaRpcServer, Public, RpcServer, StopHandle, Value, API}; use async_trait::async_trait; +use itertools::{izip, Itertools}; use jsonrpsee::core::{Error as JsonRpseeError, RpcResult}; use massa_api_exports::{ address::AddressInfo, @@ -23,41 +24,40 @@ use massa_consensus_exports::ConsensusController; use massa_execution_exports::{ ExecutionController, ExecutionStackElement, ReadOnlyExecutionRequest, ReadOnlyExecutionTarget, }; -use massa_models::operation::OperationDeserializer; -use massa_models::secure_share::SecureShareDeserializer; -use massa_models::{ - block::{Block, BlockGraphStatus}, - endorsement::SecureShareEndorsement, - error::ModelsError, - operation::SecureShareOperation, - timeslots, -}; -use massa_pos_exports::SelectorController; -use massa_protocol_exports::{PeerConnectionType, ProtocolConfig, ProtocolController}; -use massa_serialization::{DeserializeError, Deserializer}; - -use itertools::{izip, Itertools}; -use massa_models::datastore::DatastoreDeserializer; use massa_models::{ address::Address, + block::{Block, BlockGraphStatus}, block_id::BlockId, clique::Clique, composite::PubkeySig, config::CompactConfig, + datastore::DatastoreDeserializer, endorsement::EndorsementId, + endorsement::SecureShareEndorsement, + error::ModelsError, execution::EventFilter, node::NodeId, + operation::OperationDeserializer, operation::OperationId, + operation::SecureShareOperation, output_event::SCOutputEvent, prehash::{PreHashMap, PreHashSet}, + secure_share::SecureShareDeserializer, slot::Slot, + timeslots, timeslots::{get_latest_block_slot_at_timestamp, time_range_to_slot_range}, version::Version, }; use massa_pool_exports::PoolController; -use massa_signature::{KeyPair, PublicKey, PUBLIC_KEY_SIZE_BYTES}; +use massa_pos_exports::SelectorController; +use massa_protocol_exports::{PeerConnectionType, ProtocolConfig, ProtocolController}; +use massa_serialization::{DeserializeError, Deserializer}; use massa_storage::Storage; use massa_time::MassaTime; +use massa_versioning::versioning_factory::FactoryStrategy; +use massa_versioning::{ + keypair_factory::KeyPairFactory, versioning::MipStore, versioning_factory::VersioningFactory, +}; use std::collections::BTreeMap; use std::net::{IpAddr, SocketAddr}; @@ -74,6 +74,7 @@ impl API { version: Version, node_id: NodeId, storage: Storage, + mip_store: MipStore, ) -> Self { API(Public { consensus_controller, @@ -86,6 +87,7 @@ impl API { selector_controller, protocol_config, storage, + keypair_factory: KeyPairFactory { mip_store }, }) } } @@ -133,10 +135,19 @@ impl MassaRpcServer for API { is_final, } in reqs { - let address = address.unwrap_or_else(|| { - // if no addr provided, use a random one - Address::from_public_key(&KeyPair::generate().get_public_key()) - }); + let address = if let Some(addr) = address { + addr + } else { + let now = MassaTime::now().map_err(|e| { + ApiError::InconsistencyError(format!("Unable to get current time: {}", e)) + })?; + let keypair = self + .0 + .keypair_factory + .create(&(), FactoryStrategy::At(now)) + .map_err(ApiError::from)?; + Address::from_public_key(&keypair.get_public_key()) + }; let op_datastore = match operation_datastore { Some(v) => { @@ -221,10 +232,19 @@ impl MassaRpcServer for API { is_final, } in reqs { - let caller_address = caller_address.unwrap_or_else(|| { - // if no addr provided, use a random one - Address::from_public_key(&KeyPair::generate().get_public_key()) - }); + let caller_address = if let Some(addr) = caller_address { + addr + } else { + let now = MassaTime::now().map_err(|e| { + ApiError::InconsistencyError(format!("Unable to get current time: {}", e)) + })?; + let keypair = self + .0 + .keypair_factory + .create(&(), FactoryStrategy::At(now)) + .map_err(ApiError::from)?; + Address::from_public_key(&keypair.get_public_key()) + }; // TODO: // * set a maximum gas value for read-only executions to prevent attacks @@ -365,16 +385,7 @@ impl MassaRpcServer for API { PeerConnectionType::IN => false, PeerConnectionType::OUT => true, }; - //TODO: Use the peerid correctly - ( - NodeId::new( - PublicKey::from_bytes( - id.to_bytes()[..PUBLIC_KEY_SIZE_BYTES].try_into().unwrap(), - ) - .unwrap(), - ), - (peer.0.ip(), is_outgoing), - ) + (NodeId::new(id.get_public_key()), (peer.0.ip(), is_outgoing)) }) .collect::>(); diff --git a/massa-async-pool/Cargo.toml b/massa-async-pool/Cargo.toml index 171bab25096..d7745a85726 100644 --- a/massa-async-pool/Cargo.toml +++ b/massa-async-pool/Cargo.toml @@ -1,33 +1,30 @@ [package] name = "massa_async_pool" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" [dependencies] -displaydoc = "0.2" -futures = "0.3" -lazy_static = "1.4.0" -nom = "7.1" +nom = "=7.1" num = "0.4" serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -thiserror = "1.0" -tracing = "0.1" rand = "0.8" +rocksdb = "0.20" +parking_lot = { version = "0.12", features = ["deadlock_detection"] } + # custom modules massa_hash = { path = "../massa-hash" } massa_ledger_exports = { path = "../massa-ledger-exports" } -massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } +massa_db = { path = "../massa-db" } massa_time = { path = "../massa-time" } massa_proto = { path = "../massa-proto" } [dev-dependencies] -pretty_assertions = "1.2" -serial_test = "1.0" +tempfile = "3.3" +massa_ledger_worker = { path = "../massa-ledger-worker" } # for more information on what are the following features used for, see the cargo.toml at workspace level [features] diff --git a/massa-async-pool/src/changes.rs b/massa-async-pool/src/changes.rs index 3051292fe5a..7fc837585b0 100644 --- a/massa-async-pool/src/changes.rs +++ b/massa-async-pool/src/changes.rs @@ -1,12 +1,24 @@ //! Copyright (c) 2022 MASSA LABS //! This file provides structures representing changes to the asynchronous message pool -use std::ops::Bound::Included; +use std::{ + collections::{btree_map::Entry, BTreeMap}, + ops::Bound::Included, +}; use crate::{ - message::{AsyncMessage, AsyncMessageId, AsyncMessageIdDeserializer, AsyncMessageIdSerializer}, + message::{ + AsyncMessage, AsyncMessageId, AsyncMessageIdDeserializer, AsyncMessageIdSerializer, + AsyncMessageUpdate, AsyncMessageUpdateDeserializer, AsyncMessageUpdateSerializer, + }, AsyncMessageDeserializer, AsyncMessageSerializer, }; + +use massa_ledger_exports::{ + Applicable, SetOrKeep, SetUpdateOrDelete, SetUpdateOrDeleteDeserializer, + SetUpdateOrDeleteSerializer, +}; + use massa_serialization::{ Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, }; @@ -18,35 +30,40 @@ use nom::{ }; use serde::{Deserialize, Serialize}; -/// Enum representing a value U with identifier T being added or deleted -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] -pub enum Change { - /// an item with identifier T and value U is added - Add(T, U), - - /// an item with identifier T is ready to be executed - Activate(T), - - /// an item with identifier T is deleted - Delete(T), -} +/// Consolidated changes to the asynchronous message pool +#[derive(Default, Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +pub struct AsyncPoolChanges( + pub BTreeMap>, +); -#[repr(u32)] -enum ChangeId { - Add = 0, - Activate = 1, - Delete = 2, +impl Applicable for AsyncPoolChanges { + /// extends the current `AsyncPoolChanges` with another one + fn apply(&mut self, changes: AsyncPoolChanges) { + for (id, msg_change) in changes.0 { + match self.0.entry(id) { + Entry::Occupied(mut occ) => { + // apply incoming change if a change on this entry already exists + occ.get_mut().apply(msg_change); + } + Entry::Vacant(vac) => { + // otherwise insert the incoming change + vac.insert(msg_change); + } + } + } + } } -/// represents a list of additions and deletions to the asynchronous message pool -#[derive(Default, Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] -pub struct AsyncPoolChanges(pub Vec>); - /// `AsyncPoolChanges` serializer pub struct AsyncPoolChangesSerializer { u64_serializer: U64VarIntSerializer, id_serializer: AsyncMessageIdSerializer, - message_serializer: AsyncMessageSerializer, + set_update_or_delete_message_serializer: SetUpdateOrDeleteSerializer< + AsyncMessage, + AsyncMessageUpdate, + AsyncMessageSerializer, + AsyncMessageUpdateSerializer, + >, } impl AsyncPoolChangesSerializer { @@ -54,7 +71,10 @@ impl AsyncPoolChangesSerializer { Self { u64_serializer: U64VarIntSerializer::new(), id_serializer: AsyncMessageIdSerializer::new(), - message_serializer: AsyncMessageSerializer::new(), + set_update_or_delete_message_serializer: SetUpdateOrDeleteSerializer::new( + AsyncMessageSerializer::new(false), + AsyncMessageUpdateSerializer::new(false), + ), } } } @@ -72,7 +92,8 @@ impl Serializer for AsyncPoolChangesSerializer { /// use massa_serialization::Serializer; /// use massa_models::{address::Address, amount::Amount, slot::Slot}; /// use std::str::FromStr; - /// use massa_async_pool::{AsyncMessage, Change, AsyncPoolChanges, AsyncPoolChangesSerializer}; + /// use massa_async_pool::{AsyncMessage, AsyncPoolChanges, AsyncPoolChangesSerializer}; + /// use massa_ledger_exports::SetUpdateOrDelete; /// /// let message = AsyncMessage::new_with_hash( /// Slot::new(1, 0), @@ -86,9 +107,13 @@ impl Serializer for AsyncPoolChangesSerializer { /// Slot::new(2, 0), /// Slot::new(3, 0), /// vec![1, 2, 3, 4], + /// None, /// None /// ); - /// let changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message)]); + /// let mut changes = AsyncPoolChanges::default(); + /// changes + /// .0 + /// .insert(message.compute_id(), SetUpdateOrDelete::Set(message)); /// let mut serialized = Vec::new(); /// let serializer = AsyncPoolChangesSerializer::new(); /// serializer.serialize(&changes, &mut serialized).unwrap(); @@ -104,22 +129,10 @@ impl Serializer for AsyncPoolChangesSerializer { })?), buffer, )?; - for change in &value.0 { - match change { - Change::Add(id, message) => { - buffer.push(ChangeId::Add as u8); - self.id_serializer.serialize(id, buffer)?; - self.message_serializer.serialize(message, buffer)?; - } - Change::Activate(id) => { - buffer.push(ChangeId::Activate as u8); - self.id_serializer.serialize(id, buffer)?; - } - Change::Delete(id) => { - buffer.push(ChangeId::Delete as u8); - self.id_serializer.serialize(id, buffer)?; - } - } + for (id, change) in &value.0 { + self.id_serializer.serialize(id, buffer)?; + self.set_update_or_delete_message_serializer + .serialize(change, buffer)?; } Ok(()) } @@ -128,7 +141,12 @@ impl Serializer for AsyncPoolChangesSerializer { pub struct AsyncPoolChangesDeserializer { async_pool_changes_length: U64VarIntDeserializer, id_deserializer: AsyncMessageIdDeserializer, - message_deserializer: AsyncMessageDeserializer, + set_update_or_delete_message_deserializer: SetUpdateOrDeleteDeserializer< + AsyncMessage, + AsyncMessageUpdate, + AsyncMessageDeserializer, + AsyncMessageUpdateDeserializer, + >, } impl AsyncPoolChangesDeserializer { @@ -144,10 +162,19 @@ impl AsyncPoolChangesDeserializer { Included(max_async_pool_changes), ), id_deserializer: AsyncMessageIdDeserializer::new(thread_count), - message_deserializer: AsyncMessageDeserializer::new( - thread_count, - max_async_message_data, - max_key_length, + set_update_or_delete_message_deserializer: SetUpdateOrDeleteDeserializer::new( + AsyncMessageDeserializer::new( + thread_count, + max_async_message_data, + max_key_length, + false, + ), + AsyncMessageUpdateDeserializer::new( + thread_count, + max_async_message_data, + max_key_length, + false, + ), ), } } @@ -160,7 +187,8 @@ impl Deserializer for AsyncPoolChangesDeserializer { /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// use massa_models::{address::Address, amount::Amount, slot::Slot}; /// use std::str::FromStr; - /// use massa_async_pool::{AsyncMessage, AsyncMessageTrigger, Change, AsyncPoolChanges, AsyncPoolChangesSerializer, AsyncPoolChangesDeserializer}; + /// use massa_async_pool::{AsyncMessage, AsyncMessageTrigger, AsyncPoolChanges, AsyncPoolChangesSerializer, AsyncPoolChangesDeserializer}; + /// use massa_ledger_exports::SetUpdateOrDelete; /// /// let message = AsyncMessage::new_with_hash( /// Slot::new(1, 0), @@ -177,9 +205,16 @@ impl Deserializer for AsyncPoolChangesDeserializer { /// Some(AsyncMessageTrigger { /// address: Address::from_str("AU12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), /// datastore_key: Some(vec![1, 2, 3, 4]), - /// }) + /// }), + /// None /// ); - /// let changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message.clone()), Change::Delete(message.compute_id())]); + /// let mut changes = AsyncPoolChanges::default(); + /// changes + /// .0 + /// .insert(message.compute_id(), SetUpdateOrDelete::Set(message.clone())); + /// changes + /// .0 + /// .insert(message.compute_id(), SetUpdateOrDelete::Delete); /// let mut serialized = Vec::new(); /// let serializer = AsyncPoolChangesSerializer::new(); /// let deserializer = AsyncPoolChangesDeserializer::new(32, 100000, 100000, 100000); @@ -198,60 +233,28 @@ impl Deserializer for AsyncPoolChangesDeserializer { context("Failed length deserialization", |input| { self.async_pool_changes_length.deserialize(input) }), - |input: &'a [u8]| match input.first() { - Some(0) => context( - "Failed Change::Add deserialization", - tuple(( - context("Failed id deserialization", |input| { - self.id_deserializer.deserialize(input) - }), - context("Failed message deserialization", |input| { - self.message_deserializer.deserialize(input) - }), - )), - ) - .map(|(id, message)| Change::Add(id, message)) - .parse(&input[1..]), - Some(1) => context( - "Failed Change::Activate deserialization", - context("Failed id deserialization", |input| { - self.id_deserializer.deserialize(input) - }), - ) - .map(Change::Activate) - .parse(&input[1..]), - Some(2) => context( - "Failed Change::Delete deserialization", + |input: &'a [u8]| { + tuple(( context("Failed id deserialization", |input| { self.id_deserializer.deserialize(input) }), - ) - .map(Change::Delete) - .parse(&input[1..]), - Some(_) => Err(nom::Err::Error(ParseError::from_error_kind( - buffer, - nom::error::ErrorKind::Digit, - ))), - None => Err(nom::Err::Error(ParseError::from_error_kind( - buffer, - nom::error::ErrorKind::LengthValue, - ))), + context( + "Failed set_update_or_delete_message deserialization", + |input| { + self.set_update_or_delete_message_deserializer + .deserialize(input) + }, + ), + ))(input) }, ), ) - .map(AsyncPoolChanges) + .map(|vec| AsyncPoolChanges(vec.into_iter().map(|data| (data.0, data.1)).collect())) .parse(buffer) } } impl AsyncPoolChanges { - /// Extends self with another another `AsyncPoolChanges`. - /// This simply appends the contents of other to self. - /// No add/delete compensations are done. - pub fn extend(&mut self, other: AsyncPoolChanges) { - self.0.extend(other.0); - } - /// Pushes a message addition to the list of changes. /// No add/delete compensations are done. /// @@ -259,7 +262,9 @@ impl AsyncPoolChanges { /// * `msg_id`: ID of the message to push as added to the list of changes /// * `msg`: message to push as added to the list of changes pub fn push_add(&mut self, msg_id: AsyncMessageId, msg: AsyncMessage) { - self.0.push(Change::Add(msg_id, msg)); + let mut change = AsyncPoolChanges::default(); + change.0.insert(msg_id, SetUpdateOrDelete::Set(msg)); + self.apply(change); } /// Pushes a message deletion to the list of changes. @@ -268,7 +273,9 @@ impl AsyncPoolChanges { /// Arguments: /// * `msg_id`: ID of the message to push as deleted to the list of changes pub fn push_delete(&mut self, msg_id: AsyncMessageId) { - self.0.push(Change::Delete(msg_id)); + let mut change = AsyncPoolChanges::default(); + change.0.insert(msg_id, SetUpdateOrDelete::Delete); + self.apply(change); } /// Pushes a message activation to the list of changes. @@ -276,6 +283,16 @@ impl AsyncPoolChanges { /// Arguments: /// * `msg_id`: ID of the message to push as ready to be executed to the list of changes pub fn push_activate(&mut self, msg_id: AsyncMessageId) { - self.0.push(Change::Activate(msg_id)); + let mut change = AsyncPoolChanges::default(); + + let msg_update = AsyncMessageUpdate { + can_be_executed: SetOrKeep::Set(true), + ..Default::default() + }; + + change + .0 + .insert(msg_id, SetUpdateOrDelete::Update(msg_update)); + self.apply(change); } } diff --git a/massa-async-pool/src/config.rs b/massa-async-pool/src/config.rs index eb695aed9d8..d66f37793a9 100644 --- a/massa-async-pool/src/config.rs +++ b/massa-async-pool/src/config.rs @@ -7,10 +7,10 @@ pub struct AsyncPoolConfig { /// max number of messages in the pool pub max_length: u64, - /// part size (for bootstrap limits) - pub bootstrap_part_size: u64, /// max async message data (for bootstrap limits) pub max_async_message_data: u64, /// thread count pub thread_count: u8, + /// max key length for message deserialization + pub max_key_length: u32, } diff --git a/massa-async-pool/src/lib.rs b/massa-async-pool/src/lib.rs index 871f38f1818..83e1610d669 100644 --- a/massa-async-pool/src/lib.rs +++ b/massa-async-pool/src/lib.rs @@ -95,13 +95,12 @@ mod mapping_grpc; mod message; mod pool; -pub use changes::{ - AsyncPoolChanges, AsyncPoolChangesDeserializer, AsyncPoolChangesSerializer, Change, -}; +pub use changes::{AsyncPoolChanges, AsyncPoolChangesDeserializer, AsyncPoolChangesSerializer}; pub use config::AsyncPoolConfig; pub use message::{ AsyncMessage, AsyncMessageDeserializer, AsyncMessageId, AsyncMessageIdDeserializer, - AsyncMessageIdSerializer, AsyncMessageSerializer, AsyncMessageTrigger, + AsyncMessageIdSerializer, AsyncMessageInfo, AsyncMessageSerializer, AsyncMessageTrigger, + AsyncMessageTriggerSerializer, AsyncMessageUpdate, }; pub use pool::{AsyncPool, AsyncPoolDeserializer, AsyncPoolSerializer}; diff --git a/massa-async-pool/src/mapping_grpc.rs b/massa-async-pool/src/mapping_grpc.rs index 7745de6f39d..5c45c53497b 100644 --- a/massa-async-pool/src/mapping_grpc.rs +++ b/massa-async-pool/src/mapping_grpc.rs @@ -1,6 +1,7 @@ // Copyright (c) 2023 MASSA LABS -use crate::{AsyncMessage, AsyncMessageTrigger}; +use crate::{AsyncMessage, AsyncMessageTrigger, AsyncMessageUpdate}; +use massa_ledger_exports::SetOrKeep; use massa_proto::massa::api::v1 as grpc; impl From for grpc::AsyncMessage { @@ -24,6 +25,153 @@ impl From for grpc::AsyncMessage { } } +impl From for grpc::AsyncMessageUpdate { + fn from(value: AsyncMessageUpdate) -> Self { + grpc::AsyncMessageUpdate { + emission_slot: match value.emission_slot { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepSlot { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value.into()), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepSlot { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + emission_index: match value.emission_index { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepFixed64 { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepFixed64 { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + sender: match value.sender { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepString { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value.to_string()), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepString { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + destination: match value.destination { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepString { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value.to_string()), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepString { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + handler: match value.handler { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepString { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepString { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + max_gas: match value.max_gas { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepFixed64 { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepFixed64 { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + fee: match value.fee { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepFixed64 { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value.to_raw()), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepFixed64 { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + coins: match value.coins { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepFixed64 { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value.to_raw()), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepFixed64 { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + validity_start: match value.validity_start { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepSlot { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value.into()), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepSlot { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + validity_end: match value.validity_end { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepSlot { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value.into()), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepSlot { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + data: match value.data { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepBytes { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepBytes { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + trigger: match value.trigger { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepAsyncMessageTrigger { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: value.map(|trigger| trigger.into()), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepAsyncMessageTrigger { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + can_be_executed: match value.can_be_executed { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepBool { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepBool { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + hash: match value.hash { + SetOrKeep::Set(value) => Some(grpc::SetOrKeepString { + r#type: grpc::AsyncPoolChangeType::Set as i32, + value: Some(value.to_string()), + }), + SetOrKeep::Keep => Some(grpc::SetOrKeepString { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + value: None, + }), + }, + } + } +} + impl From for grpc::AsyncMessageTrigger { fn from(value: AsyncMessageTrigger) -> Self { grpc::AsyncMessageTrigger { diff --git a/massa-async-pool/src/message.rs b/massa-async-pool/src/message.rs index c77fb706637..393dd8c1b0c 100644 --- a/massa-async-pool/src/message.rs +++ b/massa-async-pool/src/message.rs @@ -2,9 +2,11 @@ //! This file defines the structure representing an asynchronous message -use massa_hash::Hash; +use massa_hash::{Hash, HashDeserializer, HashSerializer}; +use massa_ledger_exports::{Applicable, SetOrKeep, SetOrKeepDeserializer, SetOrKeepSerializer}; use massa_models::address::{AddressDeserializer, AddressSerializer}; use massa_models::amount::{AmountDeserializer, AmountSerializer}; +use massa_models::config::GENESIS_KEY; use massa_models::slot::{SlotDeserializer, SlotSerializer}; use massa_models::{ address::Address, @@ -13,8 +15,8 @@ use massa_models::{ slot::Slot, }; use massa_serialization::{ - Deserializer, OptionDeserializer, OptionSerializer, SerializeError, Serializer, - U64VarIntDeserializer, U64VarIntSerializer, + BoolDeserializer, BoolSerializer, Deserializer, OptionDeserializer, OptionSerializer, + SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, }; use nom::error::{context, ContextError, ParseError}; use nom::multi::length_data; @@ -71,6 +73,7 @@ impl Serializer for AsyncMessageIdSerializer { /// Slot::new(2, 0), /// Slot::new(3, 0), /// vec![1, 2, 3, 4], + /// None, /// None /// ); /// let id: AsyncMessageId = message.compute_id(); @@ -130,6 +133,7 @@ impl Deserializer for AsyncMessageIdDeserializer { /// Slot::new(2, 0), /// Slot::new(3, 0), /// vec![1, 2, 3, 4], + /// None, /// None /// ); /// let id: AsyncMessageId = message.compute_id(); @@ -177,8 +181,9 @@ pub struct AsyncMessageTrigger { pub datastore_key: Option>, } +#[derive(Clone)] /// Serializer for a trigger for an asynchronous message -struct AsyncMessageTriggerSerializer { +pub struct AsyncMessageTriggerSerializer { address_serializer: AddressSerializer, key_serializer: OptionSerializer, VecU8Serializer>, } @@ -192,6 +197,12 @@ impl AsyncMessageTriggerSerializer { } } +impl Default for AsyncMessageTriggerSerializer { + fn default() -> Self { + Self::new() + } +} + impl Serializer for AsyncMessageTriggerSerializer { fn serialize( &self, @@ -205,8 +216,9 @@ impl Serializer for AsyncMessageTriggerSerializer { } } +#[derive(Clone)] /// Deserializer for a trigger for an asynchronous message -struct AsyncMessageTriggerDeserializer { +pub struct AsyncMessageTriggerDeserializer { address_deserializer: AddressDeserializer, key_serializer: OptionDeserializer, VecU8Deserializer>, } @@ -298,6 +310,23 @@ pub struct AsyncMessage { pub hash: Hash, } +impl Default for AsyncMessage { + #[allow(unconditional_recursion)] + fn default() -> Self { + let genesis_address = Address::from_public_key(&(*GENESIS_KEY).get_public_key()); + let slot_zero = Slot::new(0, 0); + Self { + emission_slot: slot_zero, + sender: genesis_address, + destination: genesis_address, + validity_start: slot_zero, + validity_end: slot_zero, + hash: Hash::from_bytes(&[0; 32]), + ..Default::default() + } + } +} + impl AsyncMessage { #[allow(clippy::too_many_arguments)] /// Take an `AsyncMessage` and return it with its hash computed @@ -314,8 +343,9 @@ impl AsyncMessage { validity_end: Slot, data: Vec, trigger: Option, + can_be_executed: Option, ) -> Self { - let async_message_ser = AsyncMessageSerializer::new(); + let async_message_ser = AsyncMessageSerializer::new(can_be_executed.is_some()); let mut buffer = Vec::new(); let mut message = AsyncMessage { emission_slot, @@ -329,7 +359,7 @@ impl AsyncMessage { validity_start, validity_end, data, - can_be_executed: trigger.is_none(), + can_be_executed: can_be_executed.unwrap_or(trigger.is_none()), trigger, // placeholder hash to serialize the message, replaced below hash: Hash::from_bytes(&[0; 32]), @@ -352,8 +382,8 @@ impl AsyncMessage { } /// Recompute the hash of the message. Must be used each time we modify one field - pub fn compute_hash(&mut self) { - let async_message_ser = AsyncMessageSerializer::new(); + pub fn compute_hash(&mut self, for_db: bool) { + let async_message_ser = AsyncMessageSerializer::new(for_db); let mut buffer = Vec::new(); async_message_ser.serialize(self, &mut buffer).expect( "critical: asynchronous message serialization should never fail in recompute hash", @@ -362,17 +392,20 @@ impl AsyncMessage { } } +#[derive(Clone)] pub struct AsyncMessageSerializer { - slot_serializer: SlotSerializer, - amount_serializer: AmountSerializer, - u64_serializer: U64VarIntSerializer, - vec_u8_serializer: VecU8Serializer, - address_serializer: AddressSerializer, - trigger_serializer: OptionSerializer, + pub slot_serializer: SlotSerializer, + pub amount_serializer: AmountSerializer, + pub u64_serializer: U64VarIntSerializer, + pub vec_u8_serializer: VecU8Serializer, + pub address_serializer: AddressSerializer, + pub trigger_serializer: OptionSerializer, + pub bool_serializer: BoolSerializer, + pub for_db: bool, } impl AsyncMessageSerializer { - pub fn new() -> Self { + pub fn new(for_db: bool) -> Self { Self { slot_serializer: SlotSerializer::new(), amount_serializer: AmountSerializer::new(), @@ -380,13 +413,15 @@ impl AsyncMessageSerializer { vec_u8_serializer: VecU8Serializer::new(), address_serializer: AddressSerializer::new(), trigger_serializer: OptionSerializer::new(AsyncMessageTriggerSerializer::new()), + bool_serializer: BoolSerializer::new(), + for_db, } } } impl Default for AsyncMessageSerializer { fn default() -> Self { - Self::new() + Self::new(false) } } @@ -413,10 +448,11 @@ impl Serializer for AsyncMessageSerializer { /// Some(AsyncMessageTrigger { /// address: Address::from_str("AU12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), /// datastore_key: Some(vec![1, 2, 3, 4]) - /// }) + /// }), + /// None, /// ); /// let mut buffer = Vec::new(); - /// let message_serializer = AsyncMessageSerializer::new(); + /// let message_serializer = AsyncMessageSerializer::new(false); /// message_serializer.serialize(&message, &mut buffer).unwrap(); /// ``` fn serialize( @@ -438,7 +474,6 @@ impl Serializer for AsyncMessageSerializer { })?; buffer.extend([handler_name_len]); buffer.extend(handler_bytes); - self.u64_serializer.serialize(&value.max_gas, buffer)?; self.amount_serializer.serialize(&value.fee, buffer)?; self.amount_serializer.serialize(&value.coins, buffer)?; @@ -448,22 +483,35 @@ impl Serializer for AsyncMessageSerializer { .serialize(&value.validity_end, buffer)?; self.vec_u8_serializer.serialize(&value.data, buffer)?; self.trigger_serializer.serialize(&value.trigger, buffer)?; + if self.for_db { + self.bool_serializer + .serialize(&value.can_be_executed, buffer)?; + } Ok(()) } } +#[derive(Clone)] pub struct AsyncMessageDeserializer { - slot_deserializer: SlotDeserializer, - amount_deserializer: AmountDeserializer, - emission_index_deserializer: U64VarIntDeserializer, - max_gas_deserializer: U64VarIntDeserializer, - data_deserializer: VecU8Deserializer, - address_deserializer: AddressDeserializer, - trigger_deserializer: OptionDeserializer, + pub slot_deserializer: SlotDeserializer, + pub amount_deserializer: AmountDeserializer, + pub emission_index_deserializer: U64VarIntDeserializer, + pub max_gas_deserializer: U64VarIntDeserializer, + pub data_deserializer: VecU8Deserializer, + pub address_deserializer: AddressDeserializer, + pub trigger_deserializer: + OptionDeserializer, + pub bool_deserializer: BoolDeserializer, + pub for_db: bool, } impl AsyncMessageDeserializer { - pub fn new(thread_count: u8, max_async_message_data: u64, max_key_length: u32) -> Self { + pub fn new( + thread_count: u8, + max_async_message_data: u64, + max_key_length: u32, + for_db: bool, + ) -> Self { Self { slot_deserializer: SlotDeserializer::new( (Included(0), Included(u64::MAX)), @@ -486,6 +534,8 @@ impl AsyncMessageDeserializer { trigger_deserializer: OptionDeserializer::new(AsyncMessageTriggerDeserializer::new( max_key_length, )), + bool_deserializer: BoolDeserializer::new(), + for_db, } } } @@ -502,7 +552,7 @@ impl Deserializer for AsyncMessageDeserializer { /// Slot::new(1, 0), /// 0, /// Address::from_str("AU12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), - /// Address::from_str("AU12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), + /// Address::from_str("AS12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), /// String::from("test"), /// 10000000, /// Amount::from_str("1").unwrap(), @@ -513,12 +563,13 @@ impl Deserializer for AsyncMessageDeserializer { /// Some(AsyncMessageTrigger { /// address: Address::from_str("AU12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), /// datastore_key: Some(vec![1, 2, 3, 4]), - /// }) + /// }), + /// None, /// ); - /// let message_serializer = AsyncMessageSerializer::new(); + /// let message_serializer = AsyncMessageSerializer::new(false); /// let mut serialized = Vec::new(); /// message_serializer.serialize(&message, &mut serialized).unwrap(); - /// let message_deserializer = AsyncMessageDeserializer::new(32, 100000, 255); + /// let message_deserializer = AsyncMessageDeserializer::new(32, 100000, 255, false); /// // dbg!(&serialized); /// let (rest, message_deserialized) = message_deserializer.deserialize::(&serialized).unwrap(); /// assert!(rest.is_empty()); @@ -583,6 +634,13 @@ impl Deserializer for AsyncMessageDeserializer { context("Failed filter deserialization", |input| { self.trigger_deserializer.deserialize(input) }), + context("Failed can_be_executed deserialization", |input| { + if self.for_db { + self.bool_deserializer.deserialize(input) + } else { + Ok((input, false)) + } + }), )), ) .map( @@ -599,6 +657,7 @@ impl Deserializer for AsyncMessageDeserializer { validity_end, data, filter, + can_be_executed, )| { AsyncMessage::new_with_hash( emission_slot, @@ -613,6 +672,11 @@ impl Deserializer for AsyncMessageDeserializer { validity_end, data, filter, + if self.for_db { + Some(can_be_executed) + } else { + None + }, ) }, ) @@ -620,6 +684,395 @@ impl Deserializer for AsyncMessageDeserializer { } } +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct AsyncMessageInfo { + pub validity_start: Slot, + pub validity_end: Slot, + pub max_gas: u64, + pub can_be_executed: bool, + pub trigger: Option, +} + +impl From for AsyncMessageInfo { + fn from(value: AsyncMessage) -> Self { + Self { + validity_start: value.validity_start, + validity_end: value.validity_end, + max_gas: value.max_gas, + can_be_executed: value.can_be_executed, + trigger: value.trigger, + } + } +} + +/// represents an update to one or more fields of a `AsyncMessage` +#[derive(Default, Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +pub struct AsyncMessageUpdate { + /// Slot at which the message was emitted + pub emission_slot: SetOrKeep, + + /// Index of the emitted message within the `emission_slot`. + /// This is used for disambiguate the emission of multiple messages at the same slot. + pub emission_index: SetOrKeep, + + /// The address that sent the message + pub sender: SetOrKeep
, + + /// The address towards which the message is being sent + pub destination: SetOrKeep
, + + /// the handler function name within the destination address' bytecode + pub handler: SetOrKeep, + + /// Maximum gas to use when processing the message + pub max_gas: SetOrKeep, + + /// Fee paid by the sender when the message is processed. + pub fee: SetOrKeep, + + /// Coins sent from the sender to the target address of the message. + /// Those coins are spent by the sender address when the message is sent, + /// and credited to the destination address when receiving the message. + /// In case of failure or discard, those coins are reimbursed to the sender. + pub coins: SetOrKeep, + + /// Slot at which the message starts being valid (bound included in the validity range) + pub validity_start: SetOrKeep, + + /// Slot at which the message stops being valid (bound not included in the validity range) + pub validity_end: SetOrKeep, + + /// Raw payload data of the message + pub data: SetOrKeep>, + + /// Trigger that define whenever a message can be executed + pub trigger: SetOrKeep>, + + /// Boolean that determine if the message can be executed. For messages without filter this boolean is always true. + /// For messages with filter, this boolean is true if the filter has been matched between `validity_start` and current slot. + pub can_be_executed: SetOrKeep, + + /// Hash of the message + pub hash: SetOrKeep, +} + +/// Serializer for `AsyncMessageUpdate` +pub struct AsyncMessageUpdateSerializer { + slot_serializer: SetOrKeepSerializer, + amount_serializer: SetOrKeepSerializer, + u64_serializer: SetOrKeepSerializer, + vec_u8_serializer: SetOrKeepSerializer, VecU8Serializer>, + address_serializer: SetOrKeepSerializer, + trigger_serializer: SetOrKeepSerializer< + Option, + OptionSerializer, + >, + bool_serializer: SetOrKeepSerializer, + regular_bool_serializer: BoolSerializer, + hash_serializer: SetOrKeepSerializer, + for_db: bool, +} + +impl AsyncMessageUpdateSerializer { + /// Creates a new `AsyncMessageUpdateSerializer` + pub fn new(for_db: bool) -> Self { + Self { + slot_serializer: SetOrKeepSerializer::new(SlotSerializer::new()), + amount_serializer: SetOrKeepSerializer::new(AmountSerializer::new()), + u64_serializer: SetOrKeepSerializer::new(U64VarIntSerializer::new()), + vec_u8_serializer: SetOrKeepSerializer::new(VecU8Serializer::new()), + address_serializer: SetOrKeepSerializer::new(AddressSerializer::new()), + trigger_serializer: SetOrKeepSerializer::new(OptionSerializer::new( + AsyncMessageTriggerSerializer::new(), + )), + bool_serializer: SetOrKeepSerializer::new(BoolSerializer::new()), + regular_bool_serializer: BoolSerializer::new(), + hash_serializer: SetOrKeepSerializer::new(HashSerializer::new()), + for_db, + } + } +} + +impl Default for AsyncMessageUpdateSerializer { + fn default() -> Self { + Self::new(false) + } +} + +impl Serializer for AsyncMessageUpdateSerializer { + fn serialize( + &self, + value: &AsyncMessageUpdate, + buffer: &mut Vec, + ) -> Result<(), SerializeError> { + self.slot_serializer + .serialize(&value.emission_slot, buffer)?; + self.u64_serializer + .serialize(&value.emission_index, buffer)?; + self.address_serializer.serialize(&value.sender, buffer)?; + self.address_serializer + .serialize(&value.destination, buffer)?; + + match &value.handler { + SetOrKeep::Keep => { + self.regular_bool_serializer.serialize(&false, buffer)?; + } + SetOrKeep::Set(s) => { + let handler_bytes = s.as_bytes(); + let handler_name_len: u8 = handler_bytes.len().try_into().map_err(|_| { + SerializeError::GeneralError( + "could not convert handler name length to u8".into(), + ) + })?; + self.regular_bool_serializer.serialize(&true, buffer)?; + buffer.extend([handler_name_len]); + buffer.extend(handler_bytes); + } + }; + + self.u64_serializer.serialize(&value.max_gas, buffer)?; + self.amount_serializer.serialize(&value.fee, buffer)?; + self.amount_serializer.serialize(&value.coins, buffer)?; + self.slot_serializer + .serialize(&value.validity_start, buffer)?; + self.slot_serializer + .serialize(&value.validity_end, buffer)?; + self.vec_u8_serializer.serialize(&value.data, buffer)?; + self.trigger_serializer.serialize(&value.trigger, buffer)?; + if self.for_db { + self.bool_serializer + .serialize(&value.can_be_executed, buffer)?; + } + self.hash_serializer.serialize(&value.hash, buffer)?; + Ok(()) + } +} + +/// Deserializer for `AsyncMessageUpdate` +pub struct AsyncMessageUpdateDeserializer { + slot_deserializer: SetOrKeepDeserializer, + amount_deserializer: SetOrKeepDeserializer, + emission_index_deserializer: SetOrKeepDeserializer, + max_gas_deserializer: SetOrKeepDeserializer, + data_deserializer: SetOrKeepDeserializer, VecU8Deserializer>, + address_deserializer: SetOrKeepDeserializer, + trigger_deserializer: SetOrKeepDeserializer< + Option, + OptionDeserializer, + >, + bool_deserializer: SetOrKeepDeserializer, + regular_bool_deserializer: BoolDeserializer, + hash_deserializer: SetOrKeepDeserializer, + for_db: bool, +} + +impl AsyncMessageUpdateDeserializer { + /// Creates a new `AsyncMessageUpdateDeserializer` + pub fn new( + thread_count: u8, + max_async_message_data: u64, + max_key_length: u32, + for_db: bool, + ) -> Self { + Self { + slot_deserializer: SetOrKeepDeserializer::new(SlotDeserializer::new( + (Included(0), Included(u64::MAX)), + (Included(0), Excluded(thread_count)), + )), + amount_deserializer: SetOrKeepDeserializer::new(AmountDeserializer::new( + Included(Amount::MIN), + Included(Amount::MAX), + )), + emission_index_deserializer: SetOrKeepDeserializer::new(U64VarIntDeserializer::new( + Included(0), + Included(u64::MAX), + )), + max_gas_deserializer: SetOrKeepDeserializer::new(U64VarIntDeserializer::new( + Included(0), + Included(u64::MAX), + )), + data_deserializer: SetOrKeepDeserializer::new(VecU8Deserializer::new( + Included(0), + Included(max_async_message_data), + )), + address_deserializer: SetOrKeepDeserializer::new(AddressDeserializer::new()), + trigger_deserializer: SetOrKeepDeserializer::new(OptionDeserializer::new( + AsyncMessageTriggerDeserializer::new(max_key_length), + )), + bool_deserializer: SetOrKeepDeserializer::new(BoolDeserializer::new()), + regular_bool_deserializer: BoolDeserializer::new(), + hash_deserializer: SetOrKeepDeserializer::new(HashDeserializer::new()), + for_db, + } + } +} + +impl Deserializer for AsyncMessageUpdateDeserializer { + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], AsyncMessageUpdate, E> { + context( + "Failed AsyncMessageUpdate deserialization", + tuple(( + context("Failed emission_slot deserialization", |input| { + self.slot_deserializer.deserialize(input) + }), + context("Failed emission_index deserialization", |input| { + self.emission_index_deserializer.deserialize(input) + }), + context("Failed sender deserialization", |input| { + self.address_deserializer.deserialize(input) + }), + context("Failed destination deserialization", |input| { + self.address_deserializer.deserialize(input) + }), + context("Failed handler deserialization", |input| { + let (rest, id) = self.regular_bool_deserializer.deserialize(input)?; + if id { + let (rest, array) = length_data(|input: &'a [u8]| match input.first() { + Some(len) => Ok((&input[1..], *len)), + None => Err(nom::Err::Error(ParseError::from_error_kind( + input, + nom::error::ErrorKind::LengthValue, + ))), + })(rest)?; + Ok(( + rest, + SetOrKeep::Set(String::from_utf8(array.to_vec()).map_err(|_| { + nom::Err::Error(ParseError::from_error_kind( + input, + nom::error::ErrorKind::Fail, + )) + })?), + )) + } else { + Ok((rest, SetOrKeep::Keep)) + } + }), + context("Failed max_gas deserialization", |input| { + self.max_gas_deserializer.deserialize(input) + }), + context("Failed fee deserialization", |input| { + self.amount_deserializer.deserialize(input) + }), + context("Failed coins deserialization", |input| { + self.amount_deserializer.deserialize(input) + }), + context("Failed validity_start deserialization", |input| { + self.slot_deserializer.deserialize(input) + }), + context("Failed validity_end deserialization", |input| { + self.slot_deserializer.deserialize(input) + }), + context("Failed data deserialization", |input| { + self.data_deserializer.deserialize(input) + }), + context("Failed filter deserialization", |input| { + self.trigger_deserializer.deserialize(input) + }), + context("Failed can_be_executed deserialization", |input| { + if self.for_db { + self.bool_deserializer.deserialize(input) + } else { + Ok((input, SetOrKeep::Keep)) + } + }), + context("Failed hash deserialization", |input| { + self.hash_deserializer.deserialize(input) + }), + )), + ) + .map( + |( + emission_slot, + emission_index, + sender, + destination, + handler, + max_gas, + fee, + coins, + validity_start, + validity_end, + data, + trigger, + can_be_executed, + hash, + )| { + AsyncMessageUpdate { + emission_slot, + emission_index, + sender, + destination, + handler, + max_gas, + fee, + coins, + validity_start, + validity_end, + data, + trigger, + can_be_executed, + hash, + } + }, + ) + .parse(buffer) + } +} + +impl Applicable for AsyncMessageUpdate { + /// extends the `AsyncMessageUpdate` with another one + fn apply(&mut self, update: AsyncMessageUpdate) { + self.emission_slot.apply(update.emission_slot); + self.emission_index.apply(update.emission_index); + self.sender.apply(update.sender); + self.destination.apply(update.destination); + self.handler.apply(update.handler); + self.max_gas.apply(update.max_gas); + self.fee.apply(update.fee); + self.coins.apply(update.coins); + self.validity_start.apply(update.validity_start); + self.validity_end.apply(update.validity_end); + self.data.apply(update.data); + self.trigger.apply(update.trigger); + self.can_be_executed.apply(update.can_be_executed); + self.hash.apply(update.hash); + } +} + +impl Applicable for AsyncMessage { + /// extends the `AsyncMessage` with a `AsyncMessageUpdate` + fn apply(&mut self, update: AsyncMessageUpdate) { + update.emission_slot.apply_to(&mut self.emission_slot); + update.emission_index.apply_to(&mut self.emission_index); + update.sender.apply_to(&mut self.sender); + update.destination.apply_to(&mut self.destination); + update.handler.apply_to(&mut self.handler); + update.max_gas.apply_to(&mut self.max_gas); + update.fee.apply_to(&mut self.fee); + update.coins.apply_to(&mut self.coins); + update.validity_start.apply_to(&mut self.validity_start); + update.validity_end.apply_to(&mut self.validity_end); + update.data.apply_to(&mut self.data); + update.trigger.apply_to(&mut self.trigger); + update.can_be_executed.apply_to(&mut self.can_be_executed); + update.hash.apply_to(&mut self.hash); + } +} + +impl Applicable for AsyncMessageInfo { + /// extends the `AsyncMessage` with a `AsyncMessageUpdate` + fn apply(&mut self, update: AsyncMessageUpdate) { + update.max_gas.apply_to(&mut self.max_gas); + update.validity_start.apply_to(&mut self.validity_start); + update.validity_end.apply_to(&mut self.validity_end); + update.trigger.apply_to(&mut self.trigger); + update.can_be_executed.apply_to(&mut self.can_be_executed); + } +} + #[cfg(test)] mod tests { use massa_serialization::{DeserializeError, Deserializer, Serializer}; @@ -654,8 +1107,9 @@ mod tests { .unwrap(), datastore_key: None, }), + None, ); - let message_serializer = AsyncMessageSerializer::new(); + let message_serializer = AsyncMessageSerializer::new(false); let mut serialized = Vec::new(); message_serializer .serialize(&message, &mut serialized) @@ -664,6 +1118,7 @@ mod tests { THREAD_COUNT, MAX_ASYNC_MESSAGE_DATA, MAX_DATASTORE_KEY_LENGTH as u32, + false, ); serialized[1] = 50; message_deserializer diff --git a/massa-async-pool/src/pool.rs b/massa-async-pool/src/pool.rs index 6e6222ad1fb..8f934984138 100644 --- a/massa-async-pool/src/pool.rs +++ b/massa-async-pool/src/pool.rs @@ -3,17 +3,21 @@ //! This file defines a finite size final pool of asynchronous messages for use in the context of autonomous smart contracts use crate::{ - changes::{AsyncPoolChanges, Change}, + changes::AsyncPoolChanges, config::AsyncPoolConfig, - message::{AsyncMessage, AsyncMessageId}, + message::{AsyncMessage, AsyncMessageId, AsyncMessageInfo, AsyncMessageUpdate}, AsyncMessageDeserializer, AsyncMessageIdDeserializer, AsyncMessageIdSerializer, - AsyncMessageSerializer, AsyncMessageTrigger, + AsyncMessageSerializer, }; -use massa_hash::{Hash, HASH_SIZE_BYTES}; -use massa_ledger_exports::LedgerChanges; -use massa_models::{slot::Slot, streaming_step::StreamingStep}; +use massa_db::{ + DBBatch, MassaDB, ASYNC_POOL_PREFIX, CF_ERROR, MESSAGE_ID_DESER_ERROR, MESSAGE_ID_SER_ERROR, + MESSAGE_SER_ERROR, STATE_CF, +}; +use massa_ledger_exports::{Applicable, SetOrKeep, SetUpdateOrDelete}; +use massa_models::address::Address; use massa_serialization::{ - Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, + DeserializeError, Deserializer, SerializeError, Serializer, U64VarIntDeserializer, + U64VarIntSerializer, }; use nom::{ error::{context, ContextError, ParseError}, @@ -21,50 +25,244 @@ use nom::{ sequence::tuple, IResult, Parser, }; -use std::collections::BTreeMap; -use std::ops::Bound::{Excluded, Included, Unbounded}; +use parking_lot::RwLock; +use rocksdb::{Direction, IteratorMode}; +use std::ops::Bound::Included; +use std::{collections::BTreeMap, sync::Arc}; + +const EMISSION_SLOT_IDENT: u8 = 0u8; +const EMISSION_INDEX_IDENT: u8 = 1u8; +const SENDER_IDENT: u8 = 2u8; +const DESTINATION_IDENT: u8 = 3u8; +const HANDLER_IDENT: u8 = 4u8; +const MAX_GAS_IDENT: u8 = 5u8; +const FEE_IDENT: u8 = 6u8; +const COINS_IDENT: u8 = 7u8; +const VALIDITY_START_IDENT: u8 = 8u8; +const VALIDITY_END_IDENT: u8 = 9u8; +const DATA_IDENT: u8 = 10u8; +const TRIGGER_IDENT: u8 = 11u8; +const CAN_BE_EXECUTED_IDENT: u8 = 12u8; + +/// Emission slot key formatting macro +#[macro_export] +macro_rules! emission_slot_key { + ($id:expr) => { + [ + &ASYNC_POOL_PREFIX.as_bytes(), + &$id[..], + &[EMISSION_SLOT_IDENT], + ] + .concat() + }; +} + +/// Emission index key formatting macro +#[macro_export] +macro_rules! emission_index_key { + ($id:expr) => { + [ + &ASYNC_POOL_PREFIX.as_bytes(), + &$id[..], + &[EMISSION_INDEX_IDENT], + ] + .concat() + }; +} + +/// Sender key formatting macro +#[macro_export] +macro_rules! sender_key { + ($id:expr) => { + [&ASYNC_POOL_PREFIX.as_bytes(), &$id[..], &[SENDER_IDENT]].concat() + }; +} + +/// Destination key formatting macro +#[macro_export] +macro_rules! destination_key { + ($id:expr) => { + [ + &ASYNC_POOL_PREFIX.as_bytes(), + &$id[..], + &[DESTINATION_IDENT], + ] + .concat() + }; +} + +/// Handler key formatting macro +#[macro_export] +macro_rules! handler_key { + ($id:expr) => { + [&ASYNC_POOL_PREFIX.as_bytes(), &$id[..], &[HANDLER_IDENT]].concat() + }; +} + +/// Max gas key formatting macro +#[macro_export] +macro_rules! max_gas_key { + ($id:expr) => { + [&ASYNC_POOL_PREFIX.as_bytes(), &$id[..], &[MAX_GAS_IDENT]].concat() + }; +} -const ASYNC_POOL_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; +/// Fee key formatting macro +#[macro_export] +macro_rules! fee_key { + ($id:expr) => { + [&ASYNC_POOL_PREFIX.as_bytes(), &$id[..], &[FEE_IDENT]].concat() + }; +} +/// Coins key formatting macro +#[macro_export] +macro_rules! coins_key { + ($id:expr) => { + [&ASYNC_POOL_PREFIX.as_bytes(), &$id[..], &[COINS_IDENT]].concat() + }; +} + +/// Validity start key formatting macro +#[macro_export] +macro_rules! validity_start_key { + ($id:expr) => { + [ + &ASYNC_POOL_PREFIX.as_bytes(), + &$id[..], + &[VALIDITY_START_IDENT], + ] + .concat() + }; +} + +/// Validity end key formatting macro +#[macro_export] +macro_rules! validity_end_key { + ($id:expr) => { + [ + &ASYNC_POOL_PREFIX.as_bytes(), + &$id[..], + &[VALIDITY_END_IDENT], + ] + .concat() + }; +} + +/// Data key formatting macro +#[macro_export] +macro_rules! data_key { + ($id:expr) => { + [&ASYNC_POOL_PREFIX.as_bytes(), &$id[..], &[DATA_IDENT]].concat() + }; +} + +/// Trigger key formatting macro +#[macro_export] +macro_rules! trigger_key { + ($id:expr) => { + [&ASYNC_POOL_PREFIX.as_bytes(), &$id[..], &[TRIGGER_IDENT]].concat() + }; +} + +/// Can be executed key formatting macro +#[macro_export] +macro_rules! can_be_executed_key { + ($id:expr) => { + [ + &ASYNC_POOL_PREFIX.as_bytes(), + &$id[..], + &[CAN_BE_EXECUTED_IDENT], + ] + .concat() + }; +} + +/// Message id prefix formatting macro +#[macro_export] +macro_rules! message_id_prefix { + ($id:expr) => { + [&ASYNC_POOL_PREFIX.as_bytes(), &$id[..]].concat() + }; +} + +#[derive(Clone)] /// Represents a pool of sorted messages in a deterministic way. /// The final asynchronous pool is attached to the output of the latest final slot within the context of massa-final-state. /// Nodes must bootstrap the final message pool when they join the network. -#[derive(Clone)] pub struct AsyncPool { /// Asynchronous pool configuration - config: AsyncPoolConfig, - - /// Messages sorted by decreasing ID (decreasing priority) - pub messages: BTreeMap, - - /// Hash of the asynchronous pool - pub hash: Hash, + pub config: AsyncPoolConfig, + pub db: Arc>, + pub message_info_cache: BTreeMap, + message_id_serializer: AsyncMessageIdSerializer, + message_serializer: AsyncMessageSerializer, + message_id_deserializer: AsyncMessageIdDeserializer, + message_deserializer_db: AsyncMessageDeserializer, } impl AsyncPool { /// Creates an empty `AsyncPool` - pub fn new(config: AsyncPoolConfig) -> AsyncPool { + pub fn new(config: AsyncPoolConfig, db: Arc>) -> AsyncPool { AsyncPool { - config, - messages: Default::default(), - hash: Hash::from_bytes(ASYNC_POOL_HASH_INITIAL_BYTES), + config: config.clone(), + db, + message_info_cache: Default::default(), + message_id_serializer: AsyncMessageIdSerializer::new(), + message_serializer: AsyncMessageSerializer::new(true), + message_id_deserializer: AsyncMessageIdDeserializer::new(config.thread_count), + message_deserializer_db: AsyncMessageDeserializer::new( + config.thread_count, + config.max_async_message_data, + config.max_key_length, + true, + ), } } - /// Creates an `AsyncPool` from an existing snapshot (and recomputes the hash) - pub fn from_snapshot( - config: AsyncPoolConfig, - messages: BTreeMap, - ) -> AsyncPool { - let mut hash = Hash::from_bytes(&[0; HASH_SIZE_BYTES]); - for (_, msg) in messages.iter() { - hash ^= msg.hash; - } + /// Recomputes the local message_info_cache after bootstrap or loading the state from disk + pub fn recompute_message_info_cache(&mut self) { + self.message_info_cache.clear(); - AsyncPool { - config, - messages, - hash, + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + // Iterates over the whole database + let mut last_id: Option> = None; + + while let Some(Ok((serialized_message_id, _))) = match last_id { + Some(id) => db + .db + .iterator_cf( + handle, + IteratorMode::From(&can_be_executed_key!(id), Direction::Forward), + ) + .nth(1), + None => db + .db + .iterator_cf( + handle, + IteratorMode::From(ASYNC_POOL_PREFIX.as_bytes(), Direction::Forward), + ) + .next(), + } { + if !serialized_message_id.starts_with(ASYNC_POOL_PREFIX.as_bytes()) { + break; + } + + let (_, message_id) = self + .message_id_deserializer + .deserialize::(&serialized_message_id[ASYNC_POOL_PREFIX.len()..]) + .expect(MESSAGE_ID_DESER_ERROR); + + if let Some(message) = self.fetch_message(&message_id) { + self.message_info_cache.insert(message_id, message.into()); + } + + last_id = Some( + serialized_message_id[ASYNC_POOL_PREFIX.len()..serialized_message_id.len() - 1] + .to_vec(), + ); } } @@ -72,8 +270,10 @@ impl AsyncPool { /// /// USED ONLY FOR BOOTSTRAP pub fn reset(&mut self) { - self.messages.clear(); - self.hash = Hash::from_bytes(ASYNC_POOL_HASH_INITIAL_BYTES); + self.db + .write() + .delete_prefix(ASYNC_POOL_PREFIX, STATE_CF, None); + self.recompute_message_info_cache(); } /// Applies pre-compiled `AsyncPoolChanges` to the pool without checking for overflows. @@ -81,189 +281,216 @@ impl AsyncPool { /// /// # arguments /// * `changes`: `AsyncPoolChanges` listing all asynchronous pool changes (message insertions/deletions) - pub fn apply_changes_unchecked(&mut self, changes: &AsyncPoolChanges) { + pub fn apply_changes_to_batch(&mut self, changes: &AsyncPoolChanges, batch: &mut DBBatch) { for change in changes.0.iter() { match change { - // add a new message to the pool - Change::Add(message_id, message) => { - if self.messages.insert(*message_id, message.clone()).is_none() { - self.hash ^= message.hash; - } + (id, SetUpdateOrDelete::Set(message)) => { + self.put_entry(id, message.clone(), batch); + self.message_info_cache + .insert(*id, AsyncMessageInfo::from(message.clone())); } - Change::Activate(message_id) => { - if let Some(message) = self.messages.get_mut(message_id) { - self.hash ^= message.hash; - message.can_be_executed = true; - message.compute_hash(); - self.hash ^= message.hash; - } + (id, SetUpdateOrDelete::Update(message_update)) => { + self.update_entry(id, message_update.clone(), batch); + + self.message_info_cache + .entry(*id) + .and_modify(|message_info| { + message_info.apply(message_update.clone()); + }); } - // delete a message from the pool - Change::Delete(message_id) => { - if let Some(removed_message) = self.messages.remove(message_id) { - self.hash ^= removed_message.hash; - } + (id, SetUpdateOrDelete::Delete) => { + self.delete_entry(id, batch); + self.message_info_cache.remove(id); } } } } - /// Settles a slot, adding new messages to the pool and returning expired and excess ones. - /// This method is called at the end of a slot execution to apply the list of emitted messages, - /// and get the list of pruned messages for `coins` reimbursement. + /// Query a message from the database. /// - /// # arguments - /// * `slot`: used to filter out expired messages, not stored - /// * `new_messages`: list of `AsyncMessage` to add to the pool - /// - /// # returns - /// The list of `(message_id, message)` that were eliminated from the pool after the changes were applied, sorted in the following order: - /// * expired messages from the pool, in priority order (from highest to lowest priority) - /// * expired messages from `new_messages` (in the order they appear in `new_messages`) - /// * excess messages after inserting all remaining `new_messages`, in priority order (from highest to lowest priority) - /// The list of message that their trigger has been triggered. - #[allow(clippy::type_complexity)] - pub fn settle_slot( - &mut self, - slot: &Slot, - new_messages: &mut Vec<(AsyncMessageId, AsyncMessage)>, - ledger_changes: &LedgerChanges, - ) -> ( - Vec<(AsyncMessageId, AsyncMessage)>, - Vec<(AsyncMessageId, AsyncMessage)>, - ) { - // Filter out all messages for which the validity end is expired. - // Note that the validity_end bound is NOT included in the validity interval of the message. - let mut eliminated: Vec<_> = self - .messages - .drain_filter(|_k, v| *slot >= v.validity_end) - .chain(new_messages.drain_filter(|(_k, v)| *slot >= v.validity_end)) - .collect(); - - // Insert new messages into the pool - self.messages.extend(new_messages.clone()); - - // Truncate message pool to its max size, removing non-prioritary items - let excess_count = self - .messages - .len() - .saturating_sub(self.config.max_length as usize); - eliminated.reserve_exact(excess_count); - for _ in 0..excess_count { - eliminated.push(self.messages.pop_last().unwrap()); // will not panic (checked at excess_count computation) - } - let mut triggered = Vec::new(); - for (id, message) in self.messages.iter_mut() { - if let Some(filter) = &message.trigger && !message.can_be_executed && is_triggered(filter, ledger_changes) - { - message.can_be_executed = true; - triggered.push((*id, message.clone())); + /// This should only be called when we know we want to execute the message. + /// Otherwise, we should use the `message_info_cache`. + pub fn fetch_message(&self, message_id: &AsyncMessageId) -> Option { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let mut serialized_message_id = Vec::new(); + self.message_id_serializer + .serialize(message_id, &mut serialized_message_id) + .expect(MESSAGE_ID_SER_ERROR); + + let mut serialized_message: Vec = Vec::new(); + for (serialized_key, serialized_value) in db + .db + .prefix_iterator_cf(handle, &message_id_prefix!(serialized_message_id)) + .flatten() + { + if !serialized_key.starts_with(&message_id_prefix!(serialized_message_id)) { + break; } + + serialized_message.extend(serialized_value.iter()); } - (eliminated, triggered) - } - /// Takes the best possible batch of messages to execute, with gas limits and slot validity filtering. - /// The returned messages are removed from the pool. - /// This method is used at the beginning of a slot execution to list asynchronous messages to execute. - /// - /// # arguments - /// * `slot`: select only messages that are valid within this slot - /// * `available_gas`: maximum amount of available gas - /// - /// # returns - /// A vector of messages, sorted from the most priority to the least priority - pub fn take_batch_to_execute( - &mut self, - slot: Slot, - mut available_gas: u64, - ) -> Vec<(AsyncMessageId, AsyncMessage)> { - // gather all selected items and remove them from self.messages - // iterate in decreasing priority order - self.messages - .drain_filter(|_, message| { - // check available gas and validity period - if available_gas >= message.max_gas - && slot >= message.validity_start - && slot < message.validity_end - && message.can_be_executed - { - available_gas -= message.max_gas; - true - } else { - false - } - }) - .collect() + match self + .message_deserializer_db + .deserialize::(&serialized_message) + { + Ok((_, message)) => Some(message), + _ => None, + } } - /// Get a part of the async pool. - /// Used for bootstrap. - /// - /// # Arguments - /// * cursor: current bootstrap state + /// Query a vec of messages from the database. /// - /// # Returns - /// The async pool part and the updated cursor - pub fn get_pool_part( + /// This should only be called when we know we want to execute the messages. + /// Otherwise, we should use the `message_info_cache`. + pub fn fetch_messages<'a>( &self, - cursor: StreamingStep, - ) -> ( - BTreeMap, - StreamingStep, - ) { - let mut pool_part = BTreeMap::new(); - let left_bound = match cursor { - StreamingStep::Started => Unbounded, - StreamingStep::Ongoing(last_id) => Excluded(last_id), - StreamingStep::Finished(_) => return (pool_part, cursor), - }; - let mut pool_part_last_id: Option = None; - for (id, message) in self.messages.range((left_bound, Unbounded)) { - if pool_part.len() < self.config.bootstrap_part_size as usize { - pool_part.insert(*id, message.clone()); - pool_part_last_id = Some(*id); - } else { - break; - } - } - if let Some(last_id) = pool_part_last_id { - (pool_part, StreamingStep::Ongoing(last_id)) - } else { - (pool_part, StreamingStep::Finished(None)) + message_ids: Vec<&'a AsyncMessageId>, + ) -> Vec<(&'a AsyncMessageId, Option)> { + let mut fetched_messages = Vec::new(); + + for message_id in message_ids.iter() { + let message = self.fetch_message(message_id); + fetched_messages.push((*message_id, message)); } + + fetched_messages } - /// Set a part of the async pool. - /// Used for bootstrap. - /// - /// # Arguments - /// * part: the async pool part provided by `get_pool_part` - /// - /// # Returns - /// The updated cursor after the current insert - pub fn set_pool_part( - &mut self, - part: BTreeMap, - ) -> StreamingStep { - for (message_id, message) in part { - if self.messages.insert(message_id, message.clone()).is_none() { - self.hash ^= message.hash; - } + /// Deserializes the key and value, useful after bootstrap + pub fn is_key_value_valid(&self, serialized_key: &[u8], serialized_value: &[u8]) -> bool { + if !serialized_key.starts_with(ASYNC_POOL_PREFIX.as_bytes()) { + return false; } - if let Some(message_id) = self.messages.last_key_value().map(|(&id, _)| id) { - StreamingStep::Ongoing(message_id) - } else { - StreamingStep::Finished(None) + + let Ok((rest, _id)) = self.message_id_deserializer.deserialize::(&serialized_key[ASYNC_POOL_PREFIX.len()..]) else { + return false; + }; + if rest.len() != 1 { + return false; } - } -} -/// Check in the ledger changes if a message trigger has been triggered -fn is_triggered(filter: &AsyncMessageTrigger, ledger_changes: &LedgerChanges) -> bool { - ledger_changes.has_changes(&filter.address, filter.datastore_key.clone()) + match rest[0] { + EMISSION_SLOT_IDENT => { + let Ok((rest, _value)) = self.message_deserializer_db.slot_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + EMISSION_INDEX_IDENT => { + let Ok((rest, _value)) = self.message_deserializer_db.emission_index_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + SENDER_IDENT => { + let Ok((rest, _value)): std::result::Result<(&[u8], Address), nom::Err>> = self.message_deserializer_db.address_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + DESTINATION_IDENT => { + let Ok((rest, _value)): std::result::Result<(&[u8], Address), nom::Err>> = self.message_deserializer_db.address_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + HANDLER_IDENT => { + let Some(len) = serialized_value.first() else { + return false; + }; + + if serialized_value.len() != *len as usize + 1 { + return false; + } + + let Ok(_value) = String::from_utf8(serialized_value[1..].to_vec()) else { + return false; + }; + } + MAX_GAS_IDENT => { + let Ok((rest, _value)) = self.message_deserializer_db.max_gas_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + FEE_IDENT => { + let Ok((rest, _value)) = self.message_deserializer_db.amount_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + COINS_IDENT => { + let Ok((rest, _value)) = self.message_deserializer_db.amount_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + VALIDITY_START_IDENT => { + let Ok((rest, _value)) = self.message_deserializer_db.slot_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + VALIDITY_END_IDENT => { + let Ok((rest, _value)) = self.message_deserializer_db.slot_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + DATA_IDENT => { + let Ok((rest, _value)) = self.message_deserializer_db.data_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + TRIGGER_IDENT => { + let Ok((rest, _value)) = self.message_deserializer_db.trigger_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + CAN_BE_EXECUTED_IDENT => { + let Ok((rest, _value)) = self.message_deserializer_db.bool_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + _ => { + return false; + } + } + + true + } } /// Serializer for `AsyncPool` @@ -285,7 +512,7 @@ impl AsyncPoolSerializer { Self { u64_serializer: U64VarIntSerializer::new(), async_message_id_serializer: AsyncMessageIdSerializer::new(), - async_message_serializer: AsyncMessageSerializer::new(), + async_message_serializer: AsyncMessageSerializer::new(true), } } } @@ -313,7 +540,7 @@ impl Serializer> for AsyncPoolSerializer pub struct AsyncPoolDeserializer { u64_deserializer: U64VarIntDeserializer, async_message_id_deserializer: AsyncMessageIdDeserializer, - async_message_deserializer: AsyncMessageDeserializer, + async_message_deserializer_db: AsyncMessageDeserializer, } impl AsyncPoolDeserializer { @@ -330,10 +557,11 @@ impl AsyncPoolDeserializer { Included(max_async_pool_length), ), async_message_id_deserializer: AsyncMessageIdDeserializer::new(thread_count), - async_message_deserializer: AsyncMessageDeserializer::new( + async_message_deserializer_db: AsyncMessageDeserializer::new( thread_count, max_async_message_data, max_key_length, + true, ), } } @@ -355,7 +583,7 @@ impl Deserializer> for AsyncPoolDeseriali self.async_message_id_deserializer.deserialize(input) }), context("Failed async_message deserialization", |input| { - self.async_message_deserializer.deserialize(input) + self.async_message_deserializer_db.deserialize(input) }), )), ), @@ -365,42 +593,383 @@ impl Deserializer> for AsyncPoolDeseriali } } -#[test] -fn test_take_batch() { - use massa_hash::Hash; - use massa_models::{ - address::{Address, UserAddress}, - amount::Amount, - slot::Slot, - }; - use std::str::FromStr; +// Private helpers +impl AsyncPool { + /// Add every sub-entry individually for a given entry. + /// + /// # Arguments + /// * `message_id` + /// * `message` + /// * `batch`: the given operation batch to update + fn put_entry(&self, message_id: &AsyncMessageId, message: AsyncMessage, batch: &mut DBBatch) { + let db = self.db.read(); - let config = AsyncPoolConfig { - thread_count: 2, - max_length: 10, - max_async_message_data: 1_000_000, - bootstrap_part_size: 100, - }; - let mut pool = AsyncPool::new(config); - let address = Address::User(UserAddress(Hash::compute_from(b"abc"))); - for i in 1..10 { - let message = AsyncMessage::new_with_hash( - Slot::new(0, 0), - 0, - address, - address, - "function".to_string(), - i, - Amount::from_str("0.1").unwrap(), - Amount::from_str("0.3").unwrap(), - Slot::new(1, 0), - Slot::new(3, 0), - Vec::new(), - None, + let mut serialized_message_id = Vec::new(); + self.message_id_serializer + .serialize(message_id, &mut serialized_message_id) + .expect(MESSAGE_ID_SER_ERROR); + + // Emission slot + let mut serialized_emission_slot = Vec::new(); + self.message_serializer + .slot_serializer + .serialize(&message.emission_slot, &mut serialized_emission_slot) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + emission_slot_key!(serialized_message_id), + &serialized_emission_slot, + ); + + // Emission index + let mut serialized_emission_index = Vec::new(); + self.message_serializer + .u64_serializer + .serialize(&message.emission_index, &mut serialized_emission_index) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + emission_index_key!(serialized_message_id), + &serialized_emission_index, ); - pool.messages.insert(message.compute_id(), message); + + // Sender + let mut serialized_sender = Vec::new(); + self.message_serializer + .address_serializer + .serialize(&message.sender, &mut serialized_sender) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + sender_key!(serialized_message_id), + &serialized_sender, + ); + + // Destination + let mut serialized_destination = Vec::new(); + self.message_serializer + .address_serializer + .serialize(&message.destination, &mut serialized_destination) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + destination_key!(serialized_message_id), + &serialized_destination, + ); + + // Handler + let mut serialized_handler = Vec::new(); + let handler_bytes = message.handler.as_bytes(); + let handler_name_len: u8 = handler_bytes.len().try_into().expect(MESSAGE_SER_ERROR); + serialized_handler.extend([handler_name_len]); + serialized_handler.extend(handler_bytes); + db.put_or_update_entry_value( + batch, + handler_key!(serialized_message_id), + &serialized_handler, + ); + + // Max gas + let mut serialized_max_gas = Vec::new(); + self.message_serializer + .u64_serializer + .serialize(&message.max_gas, &mut serialized_max_gas) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + max_gas_key!(serialized_message_id), + &serialized_max_gas, + ); + + // Fee + let mut serialized_fee = Vec::new(); + self.message_serializer + .amount_serializer + .serialize(&message.fee, &mut serialized_fee) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value(batch, fee_key!(serialized_message_id), &serialized_fee); + + // Coins + let mut serialized_coins = Vec::new(); + self.message_serializer + .amount_serializer + .serialize(&message.coins, &mut serialized_coins) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value(batch, coins_key!(serialized_message_id), &serialized_coins); + + // Validity start + let mut serialized_validity_start = Vec::new(); + self.message_serializer + .slot_serializer + .serialize(&message.validity_start, &mut serialized_validity_start) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + validity_start_key!(serialized_message_id), + &serialized_validity_start, + ); + + // Validity end + let mut serialized_validity_end = Vec::new(); + self.message_serializer + .slot_serializer + .serialize(&message.validity_end, &mut serialized_validity_end) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + validity_end_key!(serialized_message_id), + &serialized_validity_end, + ); + + // Data + let mut serialized_data = Vec::new(); + self.message_serializer + .vec_u8_serializer + .serialize(&message.data, &mut serialized_data) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value(batch, data_key!(serialized_message_id), &serialized_data); + + // Trigger + let mut serialized_trigger = Vec::new(); + self.message_serializer + .trigger_serializer + .serialize(&message.trigger, &mut serialized_trigger) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + trigger_key!(serialized_message_id), + &serialized_trigger, + ); + + // Can be executed + let mut serialized_can_be_executed = Vec::new(); + self.message_serializer + .bool_serializer + .serialize(&message.can_be_executed, &mut serialized_can_be_executed) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + can_be_executed_key!(serialized_message_id), + &serialized_can_be_executed, + ); + } + + /// Update the ledger entry of a given address. + /// + /// # Arguments + /// * `entry_update`: a descriptor of the entry updates to be applied + /// * `batch`: the given operation batch to update + fn update_entry( + &self, + message_id: &AsyncMessageId, + message_update: AsyncMessageUpdate, + batch: &mut DBBatch, + ) { + let db = self.db.read(); + + let mut serialized_message_id = Vec::new(); + self.message_id_serializer + .serialize(message_id, &mut serialized_message_id) + .expect(MESSAGE_ID_SER_ERROR); + + // Emission slot + if let SetOrKeep::Set(emission_slot) = message_update.emission_slot { + let mut serialized_emission_slot = Vec::new(); + self.message_serializer + .slot_serializer + .serialize(&emission_slot, &mut serialized_emission_slot) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + emission_slot_key!(serialized_message_id), + &serialized_emission_slot, + ); + } + + // Emission index + if let SetOrKeep::Set(emission_index) = message_update.emission_index { + let mut serialized_emission_index = Vec::new(); + self.message_serializer + .u64_serializer + .serialize(&emission_index, &mut serialized_emission_index) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + emission_index_key!(serialized_message_id), + &serialized_emission_index, + ); + } + + // Sender + if let SetOrKeep::Set(sender) = message_update.sender { + let mut serialized_sender = Vec::new(); + self.message_serializer + .address_serializer + .serialize(&sender, &mut serialized_sender) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + sender_key!(serialized_message_id), + &serialized_sender, + ); + } + + // Destination + if let SetOrKeep::Set(destination) = message_update.destination { + let mut serialized_destination = Vec::new(); + self.message_serializer + .address_serializer + .serialize(&destination, &mut serialized_destination) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + destination_key!(serialized_message_id), + &serialized_destination, + ); + } + + // Handler + if let SetOrKeep::Set(handler) = message_update.handler { + let mut serialized_handler = Vec::new(); + let handler_bytes = handler.as_bytes(); + let handler_name_len: u8 = handler_bytes.len().try_into().expect(MESSAGE_SER_ERROR); + serialized_handler.extend([handler_name_len]); + serialized_handler.extend(handler_bytes); + db.put_or_update_entry_value( + batch, + handler_key!(serialized_message_id), + &serialized_handler, + ); + } + + // Max gas + if let SetOrKeep::Set(max_gas) = message_update.max_gas { + let mut serialized_max_gas = Vec::new(); + self.message_serializer + .u64_serializer + .serialize(&max_gas, &mut serialized_max_gas) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + max_gas_key!(serialized_message_id), + &serialized_max_gas, + ); + } + + // Fee + if let SetOrKeep::Set(fee) = message_update.fee { + let mut serialized_fee = Vec::new(); + self.message_serializer + .amount_serializer + .serialize(&fee, &mut serialized_fee) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value(batch, fee_key!(serialized_message_id), &serialized_fee); + } + + // Coins + if let SetOrKeep::Set(coins) = message_update.coins { + let mut serialized_coins = Vec::new(); + self.message_serializer + .amount_serializer + .serialize(&coins, &mut serialized_coins) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + coins_key!(serialized_message_id), + &serialized_coins, + ); + } + + // Validity start + if let SetOrKeep::Set(validity_start) = message_update.validity_start { + let mut serialized_validity_start = Vec::new(); + self.message_serializer + .slot_serializer + .serialize(&validity_start, &mut serialized_validity_start) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + validity_start_key!(serialized_message_id), + &serialized_validity_start, + ); + } + + // Validity end + if let SetOrKeep::Set(validity_end) = message_update.validity_end { + let mut serialized_validity_end = Vec::new(); + self.message_serializer + .slot_serializer + .serialize(&validity_end, &mut serialized_validity_end) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + validity_end_key!(serialized_message_id), + &serialized_validity_end, + ); + } + + // Data + if let SetOrKeep::Set(data) = message_update.data { + let mut serialized_data = Vec::new(); + self.message_serializer + .vec_u8_serializer + .serialize(&data, &mut serialized_data) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value(batch, data_key!(serialized_message_id), &serialized_data); + } + + // Trigger + if let SetOrKeep::Set(trigger) = message_update.trigger { + let mut serialized_trigger = Vec::new(); + self.message_serializer + .trigger_serializer + .serialize(&trigger, &mut serialized_trigger) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + trigger_key!(serialized_message_id), + &serialized_trigger, + ); + } + + // Can be executed + if let SetOrKeep::Set(can_be_executed) = message_update.can_be_executed { + let mut serialized_can_be_executed = Vec::new(); + self.message_serializer + .bool_serializer + .serialize(&can_be_executed, &mut serialized_can_be_executed) + .expect(MESSAGE_SER_ERROR); + db.put_or_update_entry_value( + batch, + can_be_executed_key!(serialized_message_id), + &serialized_can_be_executed, + ); + } + } + + /// Delete every sub-entry associated to the given address. + /// + /// # Arguments + /// * batch: the given operation batch to update + fn delete_entry(&self, message_id: &AsyncMessageId, batch: &mut DBBatch) { + let db = self.db.read(); + let mut serialized_message_id = Vec::new(); + self.message_id_serializer + .serialize(message_id, &mut serialized_message_id) + .expect(MESSAGE_ID_SER_ERROR); + + db.delete_key(batch, emission_slot_key!(serialized_message_id)); + db.delete_key(batch, emission_index_key!(serialized_message_id)); + db.delete_key(batch, sender_key!(serialized_message_id)); + db.delete_key(batch, destination_key!(serialized_message_id)); + db.delete_key(batch, handler_key!(serialized_message_id)); + db.delete_key(batch, max_gas_key!(serialized_message_id)); + db.delete_key(batch, fee_key!(serialized_message_id)); + db.delete_key(batch, coins_key!(serialized_message_id)); + db.delete_key(batch, validity_start_key!(serialized_message_id)); + db.delete_key(batch, validity_end_key!(serialized_message_id)); + db.delete_key(batch, data_key!(serialized_message_id)); + db.delete_key(batch, trigger_key!(serialized_message_id)); + db.delete_key(batch, can_be_executed_key!(serialized_message_id)); } - assert_eq!(pool.messages.len(), 9); - pool.take_batch_to_execute(Slot::new(2, 0), 19); - assert_eq!(pool.messages.len(), 4); } diff --git a/massa-async-pool/src/test_exports/bootstrap.rs b/massa-async-pool/src/test_exports/bootstrap.rs index 56130bf4bd0..d3bbdc5e994 100644 --- a/massa-async-pool/src/test_exports/bootstrap.rs +++ b/massa-async-pool/src/test_exports/bootstrap.rs @@ -1,33 +1,32 @@ // Copyright (c) 2022 MASSA LABS -use std::{collections::BTreeMap, str::FromStr}; - -use crate::{AsyncMessage, AsyncMessageId, AsyncPool, AsyncPoolConfig}; -use massa_models::{address::Address, amount::Amount, config::THREAD_COUNT, slot::Slot}; +use crate::{ + AsyncMessage, AsyncMessageDeserializer, AsyncMessageId, AsyncMessageIdDeserializer, AsyncPool, +}; +use massa_db::{ASYNC_POOL_PREFIX, STATE_CF}; +use massa_models::{ + address::Address, + amount::Amount, + config::{MAX_ASYNC_MESSAGE_DATA, MAX_DATASTORE_KEY_LENGTH, THREAD_COUNT}, + slot::Slot, +}; +use massa_serialization::{DeserializeError, Deserializer}; use massa_signature::KeyPair; use rand::Rng; +use rocksdb::{Direction, IteratorMode}; +use std::str::FromStr; /// This file defines tools to test the asynchronous pool bootstrap -/// Creates a `AsyncPool` from pre-set values -pub fn create_async_pool( - config: AsyncPoolConfig, - messages: BTreeMap, -) -> AsyncPool { - let mut async_pool = AsyncPool::new(config); - async_pool.messages = messages; - async_pool -} - fn get_random_address() -> Address { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); Address::from_public_key(&keypair.get_public_key()) } -pub fn get_random_message(fee: Option) -> AsyncMessage { +pub fn get_random_message(fee: Option, thread_count: u8) -> AsyncMessage { let mut rng = rand::thread_rng(); AsyncMessage::new_with_hash( - Slot::new(rng.gen_range(0..100_000), rng.gen_range(0..THREAD_COUNT)), + Slot::new(rng.gen_range(0..100_000), rng.gen_range(0..thread_count)), 0, get_random_address(), get_random_address(), @@ -39,6 +38,7 @@ pub fn get_random_message(fee: Option) -> AsyncMessage { Slot::new(4, 0), vec![1, 2, 3], None, + None, ) } @@ -65,12 +65,99 @@ pub fn assert_eq_async_message(v1: &AsyncMessage, v2: &AsyncMessage) { /// asserts that two `AsyncPool` are equal pub fn assert_eq_async_pool_bootstrap_state(v1: &AsyncPool, v2: &AsyncPool) { + let message_id_deserializer = AsyncMessageIdDeserializer::new(THREAD_COUNT); + let message_deserializer = AsyncMessageDeserializer::new( + THREAD_COUNT, + MAX_ASYNC_MESSAGE_DATA, + MAX_DATASTORE_KEY_LENGTH as u32, + false, + ); + let db1 = v1.db.read(); + let db2 = v2.db.read(); + let handle1 = db1.db.cf_handle(STATE_CF).unwrap(); + let handle2 = db2.db.cf_handle(STATE_CF).unwrap(); + + let iter_1 = db1 + .db + .iterator_cf( + handle1, + IteratorMode::From(ASYNC_POOL_PREFIX.as_bytes(), Direction::Forward), + ) + .flatten() + .take_while(|(k, _v)| k.starts_with(ASYNC_POOL_PREFIX.as_bytes())); + let iter_2 = db2 + .db + .iterator_cf( + handle2, + IteratorMode::From(ASYNC_POOL_PREFIX.as_bytes(), Direction::Forward), + ) + .flatten() + .take_while(|(k, _v)| k.starts_with(ASYNC_POOL_PREFIX.as_bytes())); + assert_eq!( - v1.messages.len(), - v2.messages.len(), - "message count mismatch" + iter_1.count(), + iter_2.count(), + "message values count mismatch" ); - for (val1, val2) in v1.messages.iter().zip(v2.messages.iter()) { - assert_eq_async_message(val1.1, val2.1); + + // Iterates over the whole database + let mut current_id: Option = None; + let mut current_message_1: Vec = Vec::new(); + let mut current_message_2: Vec = Vec::new(); + let mut current_count = 0u8; + const TOTAL_FIELDS_COUNT: u8 = 13; + + let iter_1 = db1 + .db + .iterator_cf( + handle1, + IteratorMode::From(ASYNC_POOL_PREFIX.as_bytes(), Direction::Forward), + ) + .flatten() + .take_while(|(k, _v)| k.starts_with(ASYNC_POOL_PREFIX.as_bytes())); + let iter_2 = db2 + .db + .iterator_cf( + handle2, + IteratorMode::From(ASYNC_POOL_PREFIX.as_bytes(), Direction::Forward), + ) + .flatten() + .take_while(|(k, _v)| k.starts_with(ASYNC_POOL_PREFIX.as_bytes())); + + for (val1, val2) in iter_1.zip(iter_2) { + let (_, message_id_1) = message_id_deserializer + .deserialize::(&val1.0) + .unwrap(); + let (_, message_id_2) = message_id_deserializer + .deserialize::(&val2.0) + .unwrap(); + + if Some(message_id_1) == current_id && message_id_1 == message_id_2 { + current_count += 1; + current_message_1.extend(val1.1.iter()); + current_message_2.extend(val2.1.iter()); + if current_count == TOTAL_FIELDS_COUNT { + let (_rest, message1) = message_deserializer + .deserialize::(¤t_message_1) + .unwrap(); + let (_rest, message2) = message_deserializer + .deserialize::(¤t_message_2) + .unwrap(); + assert_eq_async_message(&message1, &message2); + + current_count = 0; + current_message_1.clear(); + current_message_2.clear(); + current_id = None; + } + } else { + // We reset the current values + current_id = Some(message_id_1); + current_count = 1; + current_message_1.clear(); + current_message_1.extend(val1.1.iter()); + current_message_2.clear(); + current_message_2.extend(val2.1.iter()); + } } } diff --git a/massa-async-pool/src/test_exports/config.rs b/massa-async-pool/src/test_exports/config.rs index 1a9644182ba..5721a4d810e 100644 --- a/massa-async-pool/src/test_exports/config.rs +++ b/massa-async-pool/src/test_exports/config.rs @@ -1,7 +1,7 @@ //! Copyright (c) 2022 MASSA LABS use massa_models::config::{ - ASYNC_POOL_BOOTSTRAP_PART_SIZE, MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, THREAD_COUNT, + MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, MAX_DATASTORE_KEY_LENGTH, THREAD_COUNT, }; ///! This file defines testing tools related to the configuration @@ -13,8 +13,8 @@ impl Default for AsyncPoolConfig { AsyncPoolConfig { max_length: MAX_ASYNC_POOL_LENGTH, max_async_message_data: MAX_ASYNC_MESSAGE_DATA, - bootstrap_part_size: ASYNC_POOL_BOOTSTRAP_PART_SIZE, thread_count: THREAD_COUNT, + max_key_length: MAX_DATASTORE_KEY_LENGTH as u32, } } } diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index aa921be5a1d..48ed94148a8 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_bootstrap" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -9,7 +9,7 @@ edition = "2021" [dependencies] displaydoc = "0.2" num_enum = "0.5" -nom = "7.1" +nom = "=7.1" rand = "0.8" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" @@ -23,12 +23,9 @@ crossbeam = "0.8.2" mio = { version = "0.8", features = ["net", "os-poll"] } # custom modules -massa_async_pool = { path = "../massa-async-pool" } massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_executed_ops = { path = "../massa-executed-ops" } massa_final_state = { path = "../massa-final-state" } massa_hash = { path = "../massa-hash" } -massa_ledger_exports = { path = "../massa-ledger-exports" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } massa_protocol_exports = { path = "../massa-protocol-exports" } @@ -36,24 +33,26 @@ massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } massa_pos_exports = { path = "../massa-pos-exports" } massa_time = { path = "../massa-time" } -massa_versioning_worker = { path = "../massa-versioning-worker" } +massa_db = { path = "../massa-db" } +massa_versioning = { path = "../massa-versioning" } [dev-dependencies] mockall = "0.11.4" bitvec = { version = "1.0", features = ["serde"] } +lazy_static = "1.4" +tempfile = "3.3" massa_final_state = { path = "../massa-final-state", features = ["testing"] } massa_async_pool = { path = "../massa-async-pool", features = ["testing"] } +massa_ledger_exports = { path = "../massa-ledger-exports" } massa_ledger_worker = { path = "../massa-ledger-worker", features = [ "testing", ] } +massa_executed_ops = { path = "../massa-executed-ops" } massa_pos_worker = { path = "../massa-pos-worker", features = ["testing"] } massa_pos_exports = { path = "../massa-pos-exports", features = ["testing"] } massa_consensus_exports = { path = "../massa-consensus-exports", features = [ "testing", ] } -lazy_static = "1.4" -tempfile = "3.3" - # for more information on what are the following features used for, see the cargo.toml at workspace level [features] diff --git a/massa-bootstrap/src/bindings/client.rs b/massa-bootstrap/src/bindings/client.rs index e6cfdd82ac0..eec2ff14176 100644 --- a/massa-bootstrap/src/bindings/client.rs +++ b/massa-bootstrap/src/bindings/client.rs @@ -8,11 +8,13 @@ use crate::messages::{ }; use crate::settings::BootstrapClientConfig; use massa_hash::Hash; -use massa_models::config::{MAX_BOOTSTRAP_MESSAGE_SIZE, MAX_BOOTSTRAP_MESSAGE_SIZE_BYTES}; +use massa_models::config::{ + MAX_BOOTSTRAP_MESSAGE_SIZE, MAX_BOOTSTRAP_MESSAGE_SIZE_BYTES, SIGNATURE_DESER_SIZE, +}; use massa_models::serialization::{DeserializeMinBEInt, SerializeMinBEInt}; use massa_models::version::{Version, VersionSerializer}; use massa_serialization::{DeserializeError, Deserializer, Serializer}; -use massa_signature::{PublicKey, Signature, SIGNATURE_SIZE_BYTES}; +use massa_signature::{PublicKey, Signature}; use rand::{rngs::StdRng, RngCore, SeedableRng}; use std::time::Instant; use std::{io::Write, net::TcpStream, time::Duration}; @@ -26,7 +28,7 @@ pub struct BootstrapClientBinder { cfg: BootstrapClientConfig, } -const KNOWN_PREFIX_LEN: usize = SIGNATURE_SIZE_BYTES + MAX_BOOTSTRAP_MESSAGE_SIZE_BYTES; +const KNOWN_PREFIX_LEN: usize = SIGNATURE_DESER_SIZE + MAX_BOOTSTRAP_MESSAGE_SIZE_BYTES; /// The known-length component of a message to be received. struct ServerMessageLeader { sig: Signature, @@ -186,16 +188,13 @@ impl BootstrapClientBinder { /// and makes error-type management cleaner fn decode_msg_leader( &self, - leader_buff: &[u8; SIGNATURE_SIZE_BYTES + MAX_BOOTSTRAP_MESSAGE_SIZE_BYTES], + leader_buff: &[u8; SIGNATURE_DESER_SIZE + MAX_BOOTSTRAP_MESSAGE_SIZE_BYTES], ) -> Result { - let sig_array = leader_buff[0..SIGNATURE_SIZE_BYTES] - .try_into() - .expect("logic error in array manipulations"); - let sig = Signature::from_bytes(&sig_array)?; + let sig = Signature::from_bytes(leader_buff)?; // construct the message len from the leader-bufff let msg_len = u32::from_be_bytes_min( - &leader_buff[SIGNATURE_SIZE_BYTES..], + &leader_buff[SIGNATURE_DESER_SIZE..], MAX_BOOTSTRAP_MESSAGE_SIZE, )? .0; diff --git a/massa-bootstrap/src/bindings/server.rs b/massa-bootstrap/src/bindings/server.rs index 56a6659b7d6..448d6e4ed8a 100644 --- a/massa-bootstrap/src/bindings/server.rs +++ b/massa-bootstrap/src/bindings/server.rs @@ -151,7 +151,10 @@ impl BootstrapServerBinder { "bootstrap server timed out sending error '{}' to addr {}", msg, addr ), - Err(e) => error!("{}", e), + Err(e) => error!( + "bootstrap server encountered error '{}' sending error '{}' to addr '{}'", + e, msg, addr + ), Ok(_) => {} } close_fn(); diff --git a/massa-bootstrap/src/client.rs b/massa-bootstrap/src/client.rs index 130d2d92070..45d85d0d5e1 100644 --- a/massa-bootstrap/src/client.rs +++ b/massa-bootstrap/src/client.rs @@ -1,23 +1,24 @@ use humantime::format_duration; -use std::{ - collections::HashSet, - io, - net::{SocketAddr, TcpStream}, - sync::{Arc, Condvar, Mutex}, - time::Duration, -}; - -use massa_final_state::FinalState; +use massa_db::DBBatch; +use massa_final_state::{FinalState, FinalStateError}; use massa_logging::massa_trace; -use massa_models::{node::NodeId, streaming_step::StreamingStep, version::Version}; +use massa_models::{node::NodeId, slot::Slot, streaming_step::StreamingStep, version::Version}; use massa_signature::PublicKey; use massa_time::MassaTime; -use massa_versioning_worker::versioning::{MipStore, MipStoreRaw}; +use massa_versioning::versioning::{ComponentStateTypeId, MipInfo, MipState, StateAtError}; use parking_lot::RwLock; use rand::{ prelude::{SliceRandom, StdRng}, SeedableRng, }; +use std::collections::BTreeMap; +use std::{ + collections::HashSet, + io, + net::{SocketAddr, TcpStream}, + sync::{Arc, Condvar, Mutex}, + time::Duration, +}; use tracing::{debug, info, warn}; use crate::{ @@ -79,16 +80,12 @@ fn stream_final_state_and_consensus( match client.next_timeout(Some(cfg.read_timeout.to_duration()))? { BootstrapServerMessage::BootstrapPart { slot, - ledger_part, - async_pool_part, - pos_cycle_part, - pos_credits_part, - exec_ops_part, - exec_de_part, - final_state_changes, + state_part, + versioning_part, consensus_part, consensus_outdated_ids, last_start_period, + last_slot_before_downtime, } => { // Set final state let mut write_final_state = global_bootstrap_state.final_state.write(); @@ -97,45 +94,20 @@ fn stream_final_state_and_consensus( if let Some(last_start_period) = last_start_period { write_final_state.last_start_period = last_start_period; } - - let last_ledger_step = write_final_state.ledger.set_ledger_part(ledger_part)?; - let last_pool_step = - write_final_state.async_pool.set_pool_part(async_pool_part); - let last_cycle_step = write_final_state - .pos_state - .set_cycle_history_part(pos_cycle_part); - let last_credits_step = write_final_state - .pos_state - .set_deferred_credits_part(pos_credits_part); - let last_ops_step = write_final_state - .executed_ops - .set_executed_ops_part(exec_ops_part); - let last_de_step = write_final_state - .executed_denunciations - .set_executed_de_part(exec_de_part); - for (changes_slot, changes) in final_state_changes.iter() { - write_final_state.ledger.apply_changes( - changes.ledger_changes.clone(), - *changes_slot, - None, - ); - write_final_state - .async_pool - .apply_changes_unchecked(&changes.async_pool_changes); - if !changes.pos_changes.is_empty() { - write_final_state.pos_state.apply_changes( - changes.pos_changes.clone(), - *changes_slot, - false, - )?; - } - if !changes.executed_ops_changes.is_empty() { - write_final_state - .executed_ops - .apply_changes(changes.executed_ops_changes.clone(), *changes_slot); - } + if let Some(last_slot_before_downtime) = last_slot_before_downtime { + write_final_state.last_slot_before_downtime = last_slot_before_downtime; } - write_final_state.slot = slot; + + let (last_state_step, last_versioning_step) = write_final_state + .db + .write() + .write_batch_bootstrap_client(state_part, versioning_part) + .map_err(|e| { + BootstrapError::GeneralError(format!( + "Cannot write received stream batch to disk: {}", + e + )) + })?; // Set consensus blocks if let Some(graph) = global_bootstrap_state.graph.as_mut() { @@ -163,12 +135,8 @@ fn stream_final_state_and_consensus( // Set new message in case of disconnection *next_bootstrap_message = BootstrapClientMessage::AskBootstrapPart { last_slot: Some(slot), - last_ledger_step, - last_pool_step, - last_cycle_step, - last_credits_step, - last_ops_step, - last_de_step, + last_state_step, + last_versioning_step, last_consensus_step, send_last_start_period: false, }; @@ -178,27 +146,30 @@ fn stream_final_state_and_consensus( "client final state bootstrap cursors: {:?}", next_bootstrap_message ); - debug!( - "client final state slot changes length: {}", - final_state_changes.len() - ); } BootstrapServerMessage::BootstrapFinished => { info!("State bootstrap complete"); // Set next bootstrap message *next_bootstrap_message = BootstrapClientMessage::AskBootstrapPeers; + + // Update MIP store by reading from the disk + let mut guard = global_bootstrap_state.final_state.write(); + let db = guard.db.clone(); + let (updated, added) = guard + .mip_store + .extend_from_db(db) + .map_err(|e| BootstrapError::from(FinalStateError::from(e)))?; + + warn_user_about_versioning_updates(updated, added); + return Ok(()); } BootstrapServerMessage::SlotTooOld => { info!("Slot is too old retry bootstrap from scratch"); *next_bootstrap_message = BootstrapClientMessage::AskBootstrapPart { last_slot: None, - last_ledger_step: StreamingStep::Started, - last_pool_step: StreamingStep::Started, - last_cycle_step: StreamingStep::Started, - last_credits_step: StreamingStep::Started, - last_ops_step: StreamingStep::Started, - last_de_step: StreamingStep::Started, + last_state_step: StreamingStep::Started, + last_versioning_step: StreamingStep::Started, last_consensus_step: StreamingStep::Started, send_last_start_period: true, }; @@ -206,7 +177,7 @@ fn stream_final_state_and_consensus( write_final_state.reset(); return Err(BootstrapError::GeneralError(String::from("Slot too old"))); } - // At this point, we have succesfully received the next message from the server, and it's an error-message String + // At this point, we have successfully received the next message from the server, and it's an error-message String BootstrapServerMessage::BootstrapError { error } => { return Err(BootstrapError::GeneralError(error)) } @@ -341,25 +312,6 @@ fn bootstrap_from_server( other => return Err(BootstrapError::UnexpectedServerMessage(other)), }; global_bootstrap_state.peers = Some(peers); - *next_bootstrap_message = BootstrapClientMessage::AskBootstrapMipStore; - } - BootstrapClientMessage::AskBootstrapMipStore => { - let mip_store_raw: MipStoreRaw = match send_client_message( - next_bootstrap_message, - client, - write_timeout, - cfg.read_timeout.into(), - "ask bootstrap versioning store timed out", - )? { - BootstrapServerMessage::BootstrapMipStore { store: store_raw } => store_raw, - BootstrapServerMessage::BootstrapError { error } => { - return Err(BootstrapError::ReceivedError(error)) - } - other => return Err(BootstrapError::UnexpectedServerMessage(other)), - }; - - global_bootstrap_state.mip_store = - Some(MipStore(Arc::new(RwLock::new(mip_store_raw)))); *next_bootstrap_message = BootstrapClientMessage::BootstrapSuccess; } BootstrapClientMessage::BootstrapSuccess => { @@ -478,7 +430,19 @@ pub fn get_state( } // create the initial cycle of PoS cycle_history - final_state_guard.pos_state.create_initial_cycle(); + let mut batch = DBBatch::new(); + final_state_guard.pos_state.create_initial_cycle(&mut batch); + + let slot = Slot::new( + final_state_guard.last_start_period, + bootstrap_config.thread_count.saturating_sub(1), + ); + + // TODO: should receive ver batch here? + final_state_guard + .db + .write() + .write_batch(batch, Default::default(), Some(slot)); } return Ok(GlobalBootstrapState::new(final_state)); } @@ -490,12 +454,8 @@ pub fn get_state( let mut next_bootstrap_message: BootstrapClientMessage = BootstrapClientMessage::AskBootstrapPart { last_slot: None, - last_ledger_step: StreamingStep::Started, - last_pool_step: StreamingStep::Started, - last_cycle_step: StreamingStep::Started, - last_credits_step: StreamingStep::Started, - last_ops_step: StreamingStep::Started, - last_de_step: StreamingStep::Started, + last_state_step: StreamingStep::Started, + last_versioning_step: StreamingStep::Started, last_consensus_step: StreamingStep::Started, send_last_start_period: true, }; @@ -601,3 +561,76 @@ fn get_bootstrap_list_iter( filtered_bootstrap_list.retain(|e| unique_node_ids.insert(e.1)); Ok(filtered_bootstrap_list) } + +fn warn_user_about_versioning_updates(updated: Vec, added: BTreeMap) { + if !added.is_empty() { + for (mip_info, mip_state) in added.iter() { + let now = MassaTime::now().expect("Cannot get current time"); + match mip_state.state_at(now, mip_info.start, mip_info.timeout) { + Ok(st_id) => { + if st_id == ComponentStateTypeId::LockedIn { + // A new MipInfo @ state locked_in - we need to urge the user to update + warn!( + "A new MIP has been locked in: {}, version: {}", + mip_info.name, mip_info.version + ); + // Safe to unwrap here (only panic if not LockedIn) + let activation_at = mip_state.activation_at(mip_info).unwrap(); + + warn!( + "Please update your Massa node before: {}", + activation_at.format_instant() + ); + } else if st_id == ComponentStateTypeId::Active { + // A new MipInfo @ state active - we are not compatible anymore + warn!( + "A new MIP has become active {:?}, version: {:?}", + mip_info.name, mip_info.version + ); + panic!( + "Please update your Massa node to support MIP version {} ({})", + mip_info.version, mip_info.name + ); + } else if st_id == ComponentStateTypeId::Defined { + // a new MipInfo @ state defined or started (or failed / error) + // warn the user to update its node + warn!( + "A new MIP has been defined: {}, version: {}", + mip_info.name, mip_info.version + ); + debug!("MIP state: {:?}", mip_state); + + warn!("Please update your node between: {} and {} if you want to support this update", mip_info.start.format_instant(), mip_info.timeout.format_instant()); + } else { + // a new MipInfo @ state defined or started (or failed / error) + // warn the user to update its node + warn!( + "A new MIP has been received: {}, version: {}", + mip_info.name, mip_info.version + ); + debug!("MIP state: {:?}", mip_state); + warn!("Please update your Massa node to support it"); + } + } + Err(StateAtError::Unpredictable) => { + warn!( + "A new MIP has started: {}, version: {}", + mip_info.name, mip_info.version + ); + debug!("MIP state: {:?}", mip_state); + + warn!("Please update your node between: {} and {} if you want to support this update", mip_info.start.format_instant(), mip_info.timeout.format_instant()); + } + Err(e) => { + // Should never happen + panic!( + "Unable to get state at {} of mip info: {:?}, error: {}", + now, mip_info, e + ) + } + } + } + } + + debug!("MIP store got {} MIP updated from bootstrap", updated.len()); +} diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index 4e26d8d9de3..43b986052e9 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -26,13 +26,13 @@ mod client; mod error; pub use error::BootstrapError; mod listener; -pub use listener::BootstrapTcpListener; mod messages; mod server; mod settings; mod tools; + pub use client::{get_state, DefaultConnector}; -use massa_versioning_worker::versioning::MipStore; +pub use listener::BootstrapTcpListener; pub use messages::{ BootstrapClientMessage, BootstrapClientMessageDeserializer, BootstrapClientMessageSerializer, BootstrapServerMessage, BootstrapServerMessageDeserializer, BootstrapServerMessageSerializer, @@ -54,9 +54,6 @@ pub struct GlobalBootstrapState { /// list of network peers pub peers: Option, - - /// versioning info state - pub mip_store: Option, } impl GlobalBootstrapState { @@ -65,7 +62,6 @@ impl GlobalBootstrapState { final_state, graph: None, peers: None, - mip_store: None, } } } diff --git a/massa-bootstrap/src/messages.rs b/massa-bootstrap/src/messages.rs index f187bc620d2..d0672399dc5 100644 --- a/massa-bootstrap/src/messages.rs +++ b/massa-bootstrap/src/messages.rs @@ -1,22 +1,11 @@ // Copyright (c) 2022 MASSA LABS use crate::settings::BootstrapServerMessageDeserializerArgs; -use massa_async_pool::{ - AsyncMessage, AsyncMessageId, AsyncMessageIdDeserializer, AsyncMessageIdSerializer, - AsyncPoolDeserializer, AsyncPoolSerializer, -}; use massa_consensus_exports::bootstrapable_graph::{ BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, }; -use massa_executed_ops::{ - ExecutedDenunciationsDeserializer, ExecutedDenunciationsSerializer, ExecutedOpsDeserializer, - ExecutedOpsSerializer, -}; -use massa_final_state::{StateChanges, StateChangesDeserializer, StateChangesSerializer}; -use massa_ledger_exports::{Key as LedgerKey, KeyDeserializer, KeySerializer}; +use massa_db::StreamBatch; use massa_models::block_id::{BlockId, BlockIdDeserializer, BlockIdSerializer}; -use massa_models::denunciation::DenunciationIndex; -use massa_models::operation::OperationId; use massa_models::prehash::PreHashSet; use massa_models::serialization::{ PreHashSetDeserializer, PreHashSetSerializer, VecU8Deserializer, VecU8Serializer, @@ -26,10 +15,6 @@ use massa_models::streaming_step::{ StreamingStep, StreamingStepDeserializer, StreamingStepSerializer, }; use massa_models::version::{Version, VersionDeserializer, VersionSerializer}; -use massa_pos_exports::{ - CycleInfo, CycleInfoDeserializer, CycleInfoSerializer, DeferredCredits, - DeferredCreditsDeserializer, DeferredCreditsSerializer, -}; use massa_protocol_exports::{ BootstrapPeers, BootstrapPeersDeserializer, BootstrapPeersSerializer, }; @@ -39,8 +24,6 @@ use massa_serialization::{ U64VarIntSerializer, }; use massa_time::{MassaTime, MassaTimeDeserializer, MassaTimeSerializer}; -use massa_versioning_worker::versioning::MipStoreRaw; -use massa_versioning_worker::versioning_ser_der::{MipStoreRawDeserializer, MipStoreRawSerializer}; use nom::error::context; use nom::multi::{length_count, length_data}; use nom::sequence::tuple; @@ -50,7 +33,6 @@ use nom::{ IResult, }; use num_enum::{IntoPrimitive, TryFromPrimitive}; -use std::collections::{BTreeMap, HashSet}; use std::convert::TryInto; use std::ops::Bound::{Excluded, Included}; @@ -74,31 +56,18 @@ pub enum BootstrapServerMessage { BootstrapPart { /// Slot the state changes are attached to slot: Slot, - /// Part of the execution ledger sent in a serialized way - ledger_part: Vec, - /// Part of the async pool - async_pool_part: BTreeMap, - /// Part of the Proof of Stake `cycle_history` - pos_cycle_part: Option, - /// Part of the Proof of Stake `deferred_credits` - pos_credits_part: DeferredCredits, - /// Part of the executed operations - exec_ops_part: BTreeMap>, - /// Part of the executed operations - exec_de_part: BTreeMap>, - /// Ledger change for addresses inferior to `address` of the client message until the actual slot. - final_state_changes: Vec<(Slot, StateChanges)>, + /// Part of the state in a serialized way + state_part: StreamBatch, + /// Part of the state (specific to versioning) in a serialized way + versioning_part: StreamBatch, /// Part of the consensus graph consensus_part: BootstrapableGraph, /// Outdated block ids in the current consensus graph bootstrap consensus_outdated_ids: PreHashSet, /// Last Start Period for network restart management last_start_period: Option, - }, - /// Bootstrap versioning store - BootstrapMipStore { - /// Server mip store - store: MipStoreRaw, + /// Last Slot before downtime for network restart management + last_slot_before_downtime: Option>, }, /// Message sent when the final state and consensus bootstrap are finished BootstrapFinished, @@ -122,9 +91,6 @@ impl ToString for BootstrapServerMessage { BootstrapServerMessage::BootstrapError { error } => { format!("BootstrapError {{ error: {} }}", error) } - BootstrapServerMessage::BootstrapMipStore { store } => { - format!("BootstrapMipStore {{ store: {:?} }}", store) - } } } } @@ -138,7 +104,6 @@ enum MessageServerTypeId { FinalStateFinished = 3u32, SlotTooOld = 4u32, BootstrapError = 5u32, - MipStore = 6u32, } /// Serializer for `BootstrapServerMessage` @@ -148,18 +113,14 @@ pub struct BootstrapServerMessageSerializer { time_serializer: MassaTimeSerializer, version_serializer: VersionSerializer, peers_serializer: BootstrapPeersSerializer, - state_changes_serializer: StateChangesSerializer, bootstrapable_graph_serializer: BootstrapableGraphSerializer, block_id_set_serializer: PreHashSetSerializer, vec_u8_serializer: VecU8Serializer, + opt_vec_u8_serializer: OptionSerializer, VecU8Serializer>, slot_serializer: SlotSerializer, - async_pool_serializer: AsyncPoolSerializer, - opt_pos_cycle_serializer: OptionSerializer, - pos_credits_serializer: DeferredCreditsSerializer, - exec_ops_serializer: ExecutedOpsSerializer, - exec_de_serializer: ExecutedDenunciationsSerializer, opt_last_start_period_serializer: OptionSerializer, - store_serializer: MipStoreRawSerializer, + opt_last_slot_before_downtime_serializer: + OptionSerializer, OptionSerializer>, } impl Default for BootstrapServerMessageSerializer { @@ -177,18 +138,15 @@ impl BootstrapServerMessageSerializer { time_serializer: MassaTimeSerializer::new(), version_serializer: VersionSerializer::new(), peers_serializer: BootstrapPeersSerializer::new(), - state_changes_serializer: StateChangesSerializer::new(), bootstrapable_graph_serializer: BootstrapableGraphSerializer::new(), block_id_set_serializer: PreHashSetSerializer::new(BlockIdSerializer::new()), vec_u8_serializer: VecU8Serializer::new(), + opt_vec_u8_serializer: OptionSerializer::new(VecU8Serializer::new()), slot_serializer: SlotSerializer::new(), - async_pool_serializer: AsyncPoolSerializer::new(), - opt_pos_cycle_serializer: OptionSerializer::new(CycleInfoSerializer::new()), - pos_credits_serializer: DeferredCreditsSerializer::new(), - exec_ops_serializer: ExecutedOpsSerializer::new(), - exec_de_serializer: ExecutedDenunciationsSerializer::new(), opt_last_start_period_serializer: OptionSerializer::new(U64VarIntSerializer::new()), - store_serializer: MipStoreRawSerializer::new(), + opt_last_slot_before_downtime_serializer: OptionSerializer::new(OptionSerializer::new( + SlotSerializer::new(), + )), } } } @@ -204,7 +162,7 @@ impl Serializer for BootstrapServerMessageSerializer { /// /// let message_serializer = BootstrapServerMessageSerializer::new(); /// let bootstrap_server_message = BootstrapServerMessage::BootstrapTime { - /// server_time: MassaTime::from(0), + /// server_time: MassaTime::from_millis(0), /// version: Version::from_str("TEST.1.10").unwrap(), /// }; /// let mut message_serialized = Vec::new(); @@ -232,46 +190,52 @@ impl Serializer for BootstrapServerMessageSerializer { } BootstrapServerMessage::BootstrapPart { slot, - ledger_part, - async_pool_part, - pos_cycle_part, - pos_credits_part, - exec_ops_part, - exec_de_part, - final_state_changes, + state_part, + versioning_part, consensus_part, consensus_outdated_ids, last_start_period, + last_slot_before_downtime, } => { // message type self.u32_serializer .serialize(&u32::from(MessageServerTypeId::FinalStatePart), buffer)?; // slot self.slot_serializer.serialize(slot, buffer)?; - // ledger - self.vec_u8_serializer.serialize(ledger_part, buffer)?; - // async pool - self.async_pool_serializer - .serialize(async_pool_part, buffer)?; - // pos cycle info - self.opt_pos_cycle_serializer - .serialize(pos_cycle_part, buffer)?; - // pos deferred credits - self.pos_credits_serializer - .serialize(pos_credits_part, buffer)?; - // executed operations - self.exec_ops_serializer.serialize(exec_ops_part, buffer)?; - // processed denunciations - self.exec_de_serializer.serialize(exec_de_part, buffer)?; - // changes length + // state self.u64_serializer - .serialize(&(final_state_changes.len() as u64), buffer)?; - // changes - for (slot, state_changes) in final_state_changes { - self.slot_serializer.serialize(slot, buffer)?; - self.state_changes_serializer - .serialize(state_changes, buffer)?; + .serialize(&(state_part.new_elements.len() as u64), buffer)?; + for (key, value) in state_part.new_elements.iter() { + self.vec_u8_serializer.serialize(key, buffer)?; + self.vec_u8_serializer.serialize(value, buffer)?; } + self.u64_serializer.serialize( + &(state_part.updates_on_previous_elements.len() as u64), + buffer, + )?; + for (key, value) in state_part.updates_on_previous_elements.iter() { + self.vec_u8_serializer.serialize(key, buffer)?; + self.opt_vec_u8_serializer.serialize(value, buffer)?; + } + self.slot_serializer + .serialize(&state_part.change_id, buffer)?; + // versioning + self.u64_serializer + .serialize(&(versioning_part.new_elements.len() as u64), buffer)?; + for (key, value) in versioning_part.new_elements.iter() { + self.vec_u8_serializer.serialize(key, buffer)?; + self.vec_u8_serializer.serialize(value, buffer)?; + } + self.u64_serializer.serialize( + &(versioning_part.updates_on_previous_elements.len() as u64), + buffer, + )?; + for (key, value) in versioning_part.updates_on_previous_elements.iter() { + self.vec_u8_serializer.serialize(key, buffer)?; + self.opt_vec_u8_serializer.serialize(value, buffer)?; + } + self.slot_serializer + .serialize(&versioning_part.change_id, buffer)?; // consensus graph self.bootstrapable_graph_serializer .serialize(consensus_part, buffer)?; @@ -281,11 +245,9 @@ impl Serializer for BootstrapServerMessageSerializer { // initial state self.opt_last_start_period_serializer .serialize(last_start_period, buffer)?; - } - BootstrapServerMessage::BootstrapMipStore { store: store_raw } => { - self.u32_serializer - .serialize(&u32::from(MessageServerTypeId::MipStore), buffer)?; - self.store_serializer.serialize(store_raw, buffer)?; + // initial state + self.opt_last_slot_before_downtime_serializer + .serialize(last_slot_before_downtime, buffer)?; } BootstrapServerMessage::BootstrapFinished => { self.u32_serializer @@ -317,20 +279,17 @@ pub struct BootstrapServerMessageDeserializer { time_deserializer: MassaTimeDeserializer, version_deserializer: VersionDeserializer, peers_deserializer: BootstrapPeersDeserializer, - length_state_changes: U64VarIntDeserializer, - state_changes_deserializer: StateChangesDeserializer, + state_new_elements_length_deserializer: U64VarIntDeserializer, + state_updates_length_deserializer: U64VarIntDeserializer, + vec_u8_deserializer: VecU8Deserializer, + opt_vec_u8_deserializer: OptionDeserializer, VecU8Deserializer>, bootstrapable_graph_deserializer: BootstrapableGraphDeserializer, block_id_set_deserializer: PreHashSetDeserializer, - ledger_bytes_deserializer: VecU8Deserializer, length_bootstrap_error: U64VarIntDeserializer, slot_deserializer: SlotDeserializer, - async_pool_deserializer: AsyncPoolDeserializer, - opt_pos_cycle_deserializer: OptionDeserializer, - pos_credits_deserializer: DeferredCreditsDeserializer, - exec_ops_deserializer: ExecutedOpsDeserializer, - executed_de_deserializer: ExecutedDenunciationsDeserializer, opt_last_start_period_deserializer: OptionDeserializer, - store_deserializer: MipStoreRawDeserializer, + opt_last_slot_before_downtime_deserializer: + OptionDeserializer, OptionDeserializer>, } impl BootstrapServerMessageDeserializer { @@ -348,25 +307,14 @@ impl BootstrapServerMessageDeserializer { args.max_advertise_length, args.max_listeners_per_peer, ), - state_changes_deserializer: StateChangesDeserializer::new( - args.thread_count, - args.max_async_pool_changes, - args.max_async_message_data, - args.max_ledger_changes_count, - args.max_datastore_key_length, - args.max_datastore_value_length, - args.max_datastore_entry_count, - args.max_rolls_length, - args.max_production_stats_length, - args.max_credits_length, - args.max_ops_changes_length, - args.endorsement_count, - args.max_denunciation_changes_length, - ), - length_state_changes: U64VarIntDeserializer::new( + vec_u8_deserializer: VecU8Deserializer::new( Included(0), - Included(args.max_changes_slot_count), + Included(args.max_datastore_value_length), ), + opt_vec_u8_deserializer: OptionDeserializer::new(VecU8Deserializer::new( + Included(0), + Included(args.max_datastore_value_length), + )), bootstrapable_graph_deserializer: BootstrapableGraphDeserializer::new( (&args).into(), args.max_bootstrap_blocks_length, @@ -376,50 +324,30 @@ impl BootstrapServerMessageDeserializer { Included(0), Included(args.max_bootstrap_blocks_length as u64), ), - ledger_bytes_deserializer: VecU8Deserializer::new( - Included(0), - Included(args.max_bootstrap_final_state_parts_size), - ), length_bootstrap_error: U64VarIntDeserializer::new( Included(0), Included(args.max_bootstrap_error_length), ), + state_new_elements_length_deserializer: U64VarIntDeserializer::new( + Included(0), + Included(args.max_new_elements), + ), + state_updates_length_deserializer: U64VarIntDeserializer::new( + Included(0), + Included(u64::MAX), + ), slot_deserializer: SlotDeserializer::new( (Included(0), Included(u64::MAX)), (Included(0), Excluded(args.thread_count)), ), - async_pool_deserializer: AsyncPoolDeserializer::new( - args.thread_count, - args.max_async_pool_length, - args.max_async_message_data, - args.max_datastore_key_length as u32, - ), - opt_pos_cycle_deserializer: OptionDeserializer::new(CycleInfoDeserializer::new( - args.max_rolls_length, - args.max_production_stats_length, - )), - pos_credits_deserializer: DeferredCreditsDeserializer::new( - args.thread_count, - args.max_credits_length, - false, - ), - exec_ops_deserializer: ExecutedOpsDeserializer::new( - args.thread_count, - args.max_executed_ops_length, - args.max_operations_per_block as u64, - ), - executed_de_deserializer: ExecutedDenunciationsDeserializer::new( - args.thread_count, - args.endorsement_count, - args.max_denunciation_changes_length, - args.max_denunciations_per_block_header as u64, - ), opt_last_start_period_deserializer: OptionDeserializer::new( U64VarIntDeserializer::new(Included(u64::MIN), Included(u64::MAX)), ), - store_deserializer: MipStoreRawDeserializer::new( - args.mip_store_stats_block_considered, - args.mip_store_stats_counters_max, + opt_last_slot_before_downtime_deserializer: OptionDeserializer::new( + OptionDeserializer::new(SlotDeserializer::new( + (Included(0), Included(u64::MAX)), + (Included(0), Excluded(args.thread_count)), + )), ), } } @@ -440,7 +368,7 @@ impl Deserializer for BootstrapServerMessageDeserializer /// thread_count: 32, endorsement_count: 16, /// max_listeners_per_peer: 1000, /// max_advertise_length: 1000, max_bootstrap_blocks_length: 1000, - /// max_operations_per_block: 1000, max_bootstrap_final_state_parts_size: 1000, + /// max_operations_per_block: 1000, max_new_elements: 1000, /// max_async_pool_changes: 1000, max_async_pool_length: 1000, max_async_message_data: 1000, /// max_ledger_changes_count: 1000, max_datastore_key_length: 255, /// max_datastore_value_length: 1000, @@ -451,7 +379,7 @@ impl Deserializer for BootstrapServerMessageDeserializer /// max_denunciations_per_block_header: 128, max_denunciation_changes_length: 1000,}; /// let message_deserializer = BootstrapServerMessageDeserializer::new(args); /// let bootstrap_server_message = BootstrapServerMessage::BootstrapTime { - /// server_time: MassaTime::from(0), + /// server_time: MassaTime::from_millis(0), /// version: Version::from_str("TEST.1.10").unwrap(), /// }; /// let mut message_serialized = Vec::new(); @@ -462,7 +390,7 @@ impl Deserializer for BootstrapServerMessageDeserializer /// server_time, /// version, /// } => { - /// assert_eq!(server_time, MassaTime::from(0)); + /// assert_eq!(server_time, MassaTime::from_millis(0)); /// assert_eq!(version, Version::from_str("TEST.1.10").unwrap()); /// } /// _ => panic!("Unexpected message"), @@ -507,46 +435,75 @@ impl Deserializer for BootstrapServerMessageDeserializer }) .map(|peers| BootstrapServerMessage::BootstrapPeers { peers }) .parse(input), - MessageServerTypeId::MipStore => { - context("Failed MIP store deserialization", |input| { - self.store_deserializer.deserialize(input) - }) - .map(|store| BootstrapServerMessage::BootstrapMipStore { store }) - .parse(input) - } MessageServerTypeId::FinalStatePart => tuple(( context("Failed slot deserialization", |input| { self.slot_deserializer.deserialize(input) }), - context("Failed ledger_data deserialization", |input| { - self.ledger_bytes_deserializer.deserialize(input) - }), - context("Failed async_pool_part deserialization", |input| { - self.async_pool_deserializer.deserialize(input) - }), - context("Failed pos_cycle_part deserialization", |input| { - self.opt_pos_cycle_deserializer.deserialize(input) - }), - context("Failed pos_credits_part deserialization", |input| { - self.pos_credits_deserializer.deserialize(input) - }), - context("Failed exec_ops_part deserialization", |input| { - self.exec_ops_deserializer.deserialize(input) - }), - context("Failed exec_de_part deserialization", |input| { - self.executed_de_deserializer.deserialize(input) - }), context( - "Failed final_state_changes deserialization", - length_count( - context("Failed length deserialization", |input| { - self.length_state_changes.deserialize(input) + "Failed state_part deserialization", + tuple(( + context( + "Failed new_elements deserialization", + length_count( + context("Failed length deserialization", |input| { + self.state_new_elements_length_deserializer + .deserialize(input) + }), + tuple(( + |input| self.vec_u8_deserializer.deserialize(input), + |input| self.vec_u8_deserializer.deserialize(input), + )), + ), + ), + context( + "Failed updates deserialization", + length_count( + context("Failed length deserialization", |input| { + self.state_updates_length_deserializer.deserialize(input) + }), + tuple(( + |input| self.vec_u8_deserializer.deserialize(input), + |input| self.opt_vec_u8_deserializer.deserialize(input), + )), + ), + ), + context("Failed slot deserialization", |input| { + self.slot_deserializer.deserialize(input) }), - tuple(( - |input| self.slot_deserializer.deserialize(input), - |input| self.state_changes_deserializer.deserialize(input), - )), - ), + )), + ), + context( + "Failed versioning_part deserialization", + tuple(( + context( + "Failed new_elements deserialization", + length_count( + context("Failed length deserialization", |input| { + self.state_new_elements_length_deserializer + .deserialize(input) + }), + tuple(( + |input| self.vec_u8_deserializer.deserialize(input), + |input| self.vec_u8_deserializer.deserialize(input), + )), + ), + ), + context( + "Failed updates deserialization", + length_count( + context("Failed length deserialization", |input| { + self.state_updates_length_deserializer.deserialize(input) + }), + tuple(( + |input| self.vec_u8_deserializer.deserialize(input), + |input| self.opt_vec_u8_deserializer.deserialize(input), + )), + ), + ), + context("Failed slot deserialization", |input| { + self.slot_deserializer.deserialize(input) + }), + )), ), context("Failed consensus_part deserialization", |input| { self.bootstrapable_graph_deserializer.deserialize(input) @@ -557,33 +514,49 @@ impl Deserializer for BootstrapServerMessageDeserializer context("Failed last_start_period deserialization", |input| { self.opt_last_start_period_deserializer.deserialize(input) }), + context( + "Failed last_slot_before_downtime deserialization", + |input| { + self.opt_last_slot_before_downtime_deserializer + .deserialize(input) + }, + ), )) .map( |( slot, - ledger_part, - async_pool_part, - pos_cycle_part, - pos_credits_part, - exec_ops_part, - exec_de_part, - final_state_changes, + (state_part_new_elems, state_part_updates, state_part_change_id), + ( + versioning_part_new_elems, + versioning_part_updates, + versioning_part_change_id, + ), consensus_part, consensus_outdated_ids, last_start_period, + last_slot_before_downtime, )| { + let state_part = StreamBatch:: { + new_elements: state_part_new_elems.into_iter().collect(), + updates_on_previous_elements: state_part_updates.into_iter().collect(), + change_id: state_part_change_id, + }; + let versioning_part = StreamBatch:: { + new_elements: versioning_part_new_elems.into_iter().collect(), + updates_on_previous_elements: versioning_part_updates + .into_iter() + .collect(), + change_id: versioning_part_change_id, + }; + BootstrapServerMessage::BootstrapPart { slot, - ledger_part, - async_pool_part, - pos_cycle_part, - pos_credits_part, - exec_ops_part, - exec_de_part, - final_state_changes, + state_part, + versioning_part, consensus_part, consensus_outdated_ids, last_start_period, + last_slot_before_downtime, } }, ) @@ -618,25 +591,15 @@ pub enum BootstrapClientMessage { AskBootstrapPart { /// Slot we are attached to for changes last_slot: Option, - /// Last received ledger key - last_ledger_step: StreamingStep, - /// Last received async message id - last_pool_step: StreamingStep, - /// Last received Proof of Stake cycle - last_cycle_step: StreamingStep, - /// Last received Proof of Stake credits slot - last_credits_step: StreamingStep, - /// Last received executed operation associated slot - last_ops_step: StreamingStep, - /// Last received executed denunciations associated slot - last_de_step: StreamingStep, + /// Last received state key + last_state_step: StreamingStep>, + /// Last received versioning key + last_versioning_step: StreamingStep>, /// Last received consensus block slot last_consensus_step: StreamingStep>, /// Should be true only for the first part, false later send_last_start_period: bool, }, - /// Ask for mip store - AskBootstrapMipStore, /// Bootstrap error BootstrapError { /// Error message @@ -653,17 +616,13 @@ enum MessageClientTypeId { AskFinalStatePart = 1u32, BootstrapError = 2u32, BootstrapSuccess = 3u32, - AskBootstrapMipStore = 4u32, } /// Serializer for `BootstrapClientMessage` pub struct BootstrapClientMessageSerializer { u32_serializer: U32VarIntSerializer, slot_serializer: SlotSerializer, - ledger_step_serializer: StreamingStepSerializer, - pool_step_serializer: StreamingStepSerializer, - cycle_step_serializer: StreamingStepSerializer, - slot_step_serializer: StreamingStepSerializer, + state_step_serializer: StreamingStepSerializer, VecU8Serializer>, block_ids_step_serializer: StreamingStepSerializer< PreHashSet, PreHashSetSerializer, @@ -677,10 +636,7 @@ impl BootstrapClientMessageSerializer { Self { u32_serializer: U32VarIntSerializer::new(), slot_serializer: SlotSerializer::new(), - ledger_step_serializer: StreamingStepSerializer::new(KeySerializer::new(true)), - pool_step_serializer: StreamingStepSerializer::new(AsyncMessageIdSerializer::new()), - cycle_step_serializer: StreamingStepSerializer::new(U64VarIntSerializer::new()), - slot_step_serializer: StreamingStepSerializer::new(SlotSerializer::new()), + state_step_serializer: StreamingStepSerializer::new(VecU8Serializer::new()), block_ids_step_serializer: StreamingStepSerializer::new(PreHashSetSerializer::new( BlockIdSerializer::new(), )), @@ -721,12 +677,8 @@ impl Serializer for BootstrapClientMessageSerializer { } BootstrapClientMessage::AskBootstrapPart { last_slot, - last_ledger_step, - last_pool_step, - last_cycle_step, - last_credits_step, - last_ops_step, - last_de_step, + last_state_step, + last_versioning_step, last_consensus_step, send_last_start_period, } => { @@ -734,16 +686,10 @@ impl Serializer for BootstrapClientMessageSerializer { .serialize(&u32::from(MessageClientTypeId::AskFinalStatePart), buffer)?; if let Some(slot) = last_slot { self.slot_serializer.serialize(slot, buffer)?; - self.ledger_step_serializer - .serialize(last_ledger_step, buffer)?; - self.pool_step_serializer - .serialize(last_pool_step, buffer)?; - self.cycle_step_serializer - .serialize(last_cycle_step, buffer)?; - self.slot_step_serializer - .serialize(last_credits_step, buffer)?; - self.slot_step_serializer.serialize(last_ops_step, buffer)?; - self.slot_step_serializer.serialize(last_de_step, buffer)?; + self.state_step_serializer + .serialize(last_state_step, buffer)?; + self.state_step_serializer + .serialize(last_versioning_step, buffer)?; self.block_ids_step_serializer .serialize(last_consensus_step, buffer)?; self.bool_serializer @@ -765,12 +711,6 @@ impl Serializer for BootstrapClientMessageSerializer { self.u32_serializer .serialize(&u32::from(MessageClientTypeId::BootstrapSuccess), buffer)?; } - BootstrapClientMessage::AskBootstrapMipStore => { - self.u32_serializer.serialize( - &u32::from(MessageClientTypeId::AskBootstrapMipStore), - buffer, - )?; - } } Ok(()) } @@ -781,10 +721,7 @@ pub struct BootstrapClientMessageDeserializer { id_deserializer: U32VarIntDeserializer, length_error_deserializer: U32VarIntDeserializer, slot_deserializer: SlotDeserializer, - ledger_step_deserializer: StreamingStepDeserializer, - pool_step_deserializer: StreamingStepDeserializer, - cycle_step_deserializer: StreamingStepDeserializer, - slot_step_deserializer: StreamingStepDeserializer, + state_step_deserializer: StreamingStepDeserializer, VecU8Deserializer>, block_ids_step_deserializer: StreamingStepDeserializer< PreHashSet, PreHashSetDeserializer, @@ -796,7 +733,7 @@ impl BootstrapClientMessageDeserializer { /// Creates a new `BootstrapClientMessageDeserializer` pub fn new( thread_count: u8, - max_datastore_key_length: u8, + max_datastore_value_length: u8, max_consensus_block_ids: u64, ) -> Self { Self { @@ -806,20 +743,9 @@ impl BootstrapClientMessageDeserializer { (Included(0), Included(u64::MAX)), (Included(0), Excluded(thread_count)), ), - ledger_step_deserializer: StreamingStepDeserializer::new(KeyDeserializer::new( - max_datastore_key_length, - true, - )), - pool_step_deserializer: StreamingStepDeserializer::new( - AsyncMessageIdDeserializer::new(thread_count), - ), - cycle_step_deserializer: StreamingStepDeserializer::new(U64VarIntDeserializer::new( + state_step_deserializer: StreamingStepDeserializer::new(VecU8Deserializer::new( Included(0), - Included(u64::MAX), - )), - slot_step_deserializer: StreamingStepDeserializer::new(SlotDeserializer::new( - (Included(0), Included(u64::MAX)), - (Included(0), Excluded(thread_count)), + Included(max_datastore_value_length as u64), )), block_ids_step_deserializer: StreamingStepDeserializer::new( PreHashSetDeserializer::new( @@ -875,21 +801,14 @@ impl Deserializer for BootstrapClientMessageDeserializer MessageClientTypeId::AskBootstrapPeers => { Ok((input, BootstrapClientMessage::AskBootstrapPeers)) } - MessageClientTypeId::AskBootstrapMipStore => { - Ok((input, BootstrapClientMessage::AskBootstrapMipStore)) - } MessageClientTypeId::AskFinalStatePart => { if input.is_empty() { Ok(( input, BootstrapClientMessage::AskBootstrapPart { last_slot: None, - last_ledger_step: StreamingStep::Started, - last_pool_step: StreamingStep::Started, - last_cycle_step: StreamingStep::Started, - last_credits_step: StreamingStep::Started, - last_ops_step: StreamingStep::Started, - last_de_step: StreamingStep::Started, + last_state_step: StreamingStep::Started, + last_versioning_step: StreamingStep::Started, last_consensus_step: StreamingStep::Started, send_last_start_period: true, }, @@ -899,23 +818,11 @@ impl Deserializer for BootstrapClientMessageDeserializer context("Failed last_slot deserialization", |input| { self.slot_deserializer.deserialize(input) }), - context("Faild last_ledger_step deserialization", |input| { - self.ledger_step_deserializer.deserialize(input) - }), - context("Failed last_pool_step deserialization", |input| { - self.pool_step_deserializer.deserialize(input) - }), - context("Failed last_cycle_step deserialization", |input| { - self.cycle_step_deserializer.deserialize(input) - }), - context("Failed last_credits_step deserialization", |input| { - self.slot_step_deserializer.deserialize(input) - }), - context("Failed last_ops_step deserialization", |input| { - self.slot_step_deserializer.deserialize(input) + context("Faild last_state_step deserialization", |input| { + self.state_step_deserializer.deserialize(input) }), - context("Failed last_de_step deserialization", |input| { - self.slot_step_deserializer.deserialize(input) + context("Faild last_versioning_step deserialization", |input| { + self.state_step_deserializer.deserialize(input) }), context("Failed last_consensus_step deserialization", |input| { self.block_ids_step_deserializer.deserialize(input) @@ -927,23 +834,15 @@ impl Deserializer for BootstrapClientMessageDeserializer .map( |( last_slot, - last_ledger_step, - last_pool_step, - last_cycle_step, - last_credits_step, - last_ops_step, - last_de_step, + last_state_step, + last_versioning_step, last_consensus_step, send_last_start_period, )| { BootstrapClientMessage::AskBootstrapPart { last_slot: Some(last_slot), - last_ledger_step, - last_pool_step, - last_cycle_step, - last_credits_step, - last_ops_step, - last_de_step, + last_state_step, + last_versioning_step, last_consensus_step, send_last_start_period, } diff --git a/massa-bootstrap/src/server.rs b/massa-bootstrap/src/server.rs index eeb51737d2d..0ef3802cf72 100644 --- a/massa-bootstrap/src/server.rs +++ b/massa-bootstrap/src/server.rs @@ -28,10 +28,9 @@ mod white_black_list; use crossbeam::channel::tick; use humantime::format_duration; -use massa_async_pool::AsyncMessageId; use massa_consensus_exports::{bootstrapable_graph::BootstrapableGraph, ConsensusController}; -use massa_final_state::{FinalState, FinalStateError}; -use massa_ledger_exports::Key as LedgerKey; +use massa_db::CHANGE_ID_DESER_ERROR; +use massa_final_state::FinalState; use massa_logging::massa_trace; use massa_models::{ block_id::BlockId, prehash::PreHashSet, slot::Slot, streaming_step::StreamingStep, @@ -41,7 +40,6 @@ use massa_models::{ use massa_protocol_exports::ProtocolController; use massa_signature::KeyPair; use massa_time::MassaTime; -use massa_versioning_worker::versioning::MipStore; use parking_lot::RwLock; use std::{ @@ -138,7 +136,6 @@ pub fn start_bootstrap_server( config: BootstrapConfig, keypair: KeyPair, version: Version, - mip_store: MipStore, ) -> Result { massa_trace!("bootstrap.lib.start_bootstrap_server", {}); @@ -184,7 +181,6 @@ pub fn start_bootstrap_server( version, ip_hist_map: HashMap::with_capacity(config.ip_list_max_size), bootstrap_config: config, - mip_store, } .event_loop(max_bootstraps) }) @@ -208,7 +204,6 @@ struct BootstrapServer<'a, L: BSEventPoller> { bootstrap_config: BootstrapConfig, version: Version, ip_hist_map: HashMap, - mip_store: MipStore, } impl BootstrapServer<'_, L> { @@ -317,7 +312,6 @@ impl BootstrapServer<'_, L> { let config = self.bootstrap_config.clone(); let bootstrap_count_token = bootstrap_sessions_counter.clone(); - let mip_store = self.mip_store.clone(); let _ = thread::Builder::new() .name(format!("bootstrap thread, peer: {}", remote_addr)) @@ -331,7 +325,6 @@ impl BootstrapServer<'_, L> { version, consensus_command_sender, protocol_controller, - mip_store, ) }); @@ -394,7 +387,6 @@ fn run_bootstrap_session( version: Version, consensus_command_sender: Box, protocol_controller: Box, - mip_store: MipStore, ) { debug!("running bootstrap for peer {}", remote_addr); let deadline = Instant::now() + config.bootstrap_timeout.to_duration(); @@ -407,7 +399,6 @@ fn run_bootstrap_session( consensus_command_sender, protocol_controller, deadline, - mip_store, ); // This drop allows the server to accept new connections before having to complete the error notifications @@ -448,12 +439,8 @@ pub fn stream_bootstrap_information( final_state: Arc>, consensus_controller: Box, mut last_slot: Option, - mut last_ledger_step: StreamingStep, - mut last_pool_step: StreamingStep, - mut last_cycle_step: StreamingStep, - mut last_credits_step: StreamingStep, - mut last_ops_step: StreamingStep, - mut last_de_step: StreamingStep, + mut last_state_step: StreamingStep>, + mut last_versioning_step: StreamingStep>, mut last_consensus_step: StreamingStep>, mut send_last_start_period: bool, bs_deadline: &Instant, @@ -467,16 +454,12 @@ pub fn stream_bootstrap_information( } let current_slot; - let ledger_part; - let async_pool_part; - let pos_cycle_part; - let pos_credits_part; - let exec_ops_part; - let exec_de_part; - let final_state_changes; + let state_part; + let versioning_part; let last_start_period; + let last_slot_before_downtime; - let mut slot_too_old = false; + let slot_too_old = false; // Scope of the final state read { @@ -487,71 +470,165 @@ pub fn stream_bootstrap_information( } else { None }; + last_slot_before_downtime = if send_last_start_period { + Some(final_state_read.last_slot_before_downtime) + } else { + None + }; - let (data, new_ledger_step) = final_state_read - .ledger - .get_ledger_part(last_ledger_step.clone())?; - ledger_part = data; - - let (pool_data, new_pool_step) = - final_state_read.async_pool.get_pool_part(last_pool_step); - async_pool_part = pool_data; - - let (cycle_data, new_cycle_step) = final_state_read - .pos_state - .get_cycle_history_part(last_cycle_step)?; - pos_cycle_part = cycle_data; - - let (credits_data, new_credits_step) = final_state_read - .pos_state - .get_deferred_credits_part(last_credits_step); - pos_credits_part = credits_data; - - let (ops_data, new_ops_step) = final_state_read - .executed_ops - .get_executed_ops_part(last_ops_step); - exec_ops_part = ops_data; - - let (de_data, new_de_step) = final_state_read - .executed_denunciations - .get_executed_de_part(last_de_step); - exec_de_part = de_data; - - if let Some(slot) = last_slot && slot != final_state_read.slot { - if slot > final_state_read.slot { - return Err(BootstrapError::GeneralError( - "Bootstrap cursor set to future slot".to_string(), - )); + let last_obtained = match &last_state_step { + StreamingStep::Started => None, + StreamingStep::Ongoing(last_key) => Some((last_key.clone(), last_slot.unwrap())), + StreamingStep::Finished(Some(last_key)) => { + Some((last_key.clone(), last_slot.unwrap())) } - final_state_changes = match final_state_read.get_state_changes_part( - slot, - new_ledger_step.clone(), - new_pool_step, - new_cycle_step, - new_credits_step, - new_ops_step, - new_de_step, - ) { - Ok(data) => data, - Err(err) if matches!(err, FinalStateError::InvalidSlot(_)) => { - slot_too_old = true; - Vec::default() + StreamingStep::Finished(None) => { + warn!("Bootstrap is finished but nothing has been streamed yet"); + None + } + }; + + state_part = final_state_read + .db + .read() + .get_batch_to_stream(last_obtained) + .map_err(|e| { + BootstrapError::GeneralError(format!("Error get_batch_to_stream: {}", e)) + })?; + + // TODO: Re-design the cursors states (e.g. in a state machine we can test independently) + let new_state_step = match ( + &last_state_step, + state_part.is_empty(), + state_part.new_elements.last_key_value(), + ) { + // We finished streaming the state already, but we received new elements while streaming consensus: we stay sync + (StreamingStep::Finished(Some(_last_key)), false, Some((key, _))) => { + StreamingStep::Finished(Some(key.clone())) + } + // We finished streaming the state already, and we received nothing new (or only updates) + (StreamingStep::Finished(Some(last_key)), _, _) => { + StreamingStep::Finished(Some(last_key.clone())) + } + // We receive our first empty state batch + (StreamingStep::Ongoing(last_key), true, _) => { + StreamingStep::Finished(Some(last_key.clone())) + } + // We still need to stream the state - no new elements + (StreamingStep::Ongoing(last_key), false, None) => { + StreamingStep::Ongoing(last_key.clone()) + } + // We receive an empty batch, but we've just started streaming + (StreamingStep::Started, true, _) => { + warn!("Bootstrap is finished but nothing has been streamed yet"); + StreamingStep::Finished(None) + } + // We still need to stream the state - new elements + (_, false, Some((new_last_key, _))) => StreamingStep::Ongoing(new_last_key.clone()), + // We finished streaming the (empty) state already, but we received new elements while streaming the versioning + (StreamingStep::Finished(None), true, _) => StreamingStep::Finished(None), + // Else, we are in an inconsistent state + _ => { + if state_part.is_empty() && state_part.new_elements.last_key_value().is_some() { + // If is_empty() has a correct implementation, this should never happen + return Err(BootstrapError::GeneralError(String::from( + "Bootstrap state_part is_empty() but it also contains new elements", + ))); + } else { + // StreamingStep::Started, false, None + return Err(BootstrapError::GeneralError(String::from( + "Bootstrap started but we have no new elements to stream", + ))); } - Err(err) => return Err(BootstrapError::FinalStateError(err)), - }; - } else { - final_state_changes = Vec::new(); + } + }; + + let last_obtained_versioning = match &last_versioning_step { + StreamingStep::Started => None, + StreamingStep::Ongoing(last_key) => Some((last_key.clone(), last_slot.unwrap())), + StreamingStep::Finished(Some(last_key)) => { + Some((last_key.clone(), last_slot.unwrap())) + } + StreamingStep::Finished(None) => { + warn!("Bootstrap is finished but nothing has been streamed yet"); + None + } + }; + + versioning_part = final_state_read + .db + .read() + .get_versioning_batch_to_stream(last_obtained_versioning) + .map_err(|e| { + BootstrapError::GeneralError(format!( + "Error get_versioning_batch_to_stream: {}", + e + )) + })?; + + // TODO: Re-design the cursors states (e.g. in a state machine we can test independently) + let new_versioning_step = match ( + &last_versioning_step, + versioning_part.is_empty(), + versioning_part.new_elements.last_key_value(), + ) { + // We finished streaming the state already, but we received new elements while streaming consensus: we stay sync + (StreamingStep::Finished(Some(_last_key)), false, Some((key, _))) => { + StreamingStep::Finished(Some(key.clone())) + } + // We finished streaming the state already, and we received nothing new (or only updates) + (StreamingStep::Finished(Some(last_key)), _, _) => { + StreamingStep::Finished(Some(last_key.clone())) + } + // We receive our first empty state batch + (StreamingStep::Ongoing(last_key), true, _) => { + StreamingStep::Finished(Some(last_key.clone())) + } + // We still need to stream the state - no new elements + (StreamingStep::Ongoing(last_key), false, None) => { + StreamingStep::Ongoing(last_key.clone()) + } + // We receive an empty batch, but we've just started streaming + (StreamingStep::Started, true, _) => { + warn!("Bootstrap is finished but nothing has been streamed yet"); + StreamingStep::Finished(None) + } + // We still need to stream the state - new elements + (_, false, Some((new_last_key, _))) => StreamingStep::Ongoing(new_last_key.clone()), + // We finished streaming the (empty) versioning already, but we received new elements while streaming the state + (StreamingStep::Finished(None), true, _) => StreamingStep::Finished(None), + _ => { + if state_part.is_empty() && state_part.new_elements.last_key_value().is_some() { + // If is_empty() has a correct implementation, this should never happen + return Err(BootstrapError::GeneralError(String::from( + "Bootstrap state_part is_empty() but it also contains new elements", + ))); + } else { + // StreamingStep::Started, false, None + return Err(BootstrapError::GeneralError(String::from( + "Bootstrap started but we have no new elements to stream", + ))); + } + } + }; + + let db_slot = final_state_read + .db + .read() + .get_change_id() + .expect(CHANGE_ID_DESER_ERROR); + + if let Some(slot) = last_slot && slot > db_slot { + return Err(BootstrapError::GeneralError( + "Bootstrap cursor set to future slot".to_string(), + )); } // Update cursors for next turn - last_ledger_step = new_ledger_step; - last_pool_step = new_pool_step; - last_cycle_step = new_cycle_step; - last_credits_step = new_credits_step; - last_ops_step = new_ops_step; - last_de_step = new_de_step; - last_slot = Some(final_state_read.slot); - current_slot = final_state_read.slot; + last_state_step = new_state_step; + last_versioning_step = new_versioning_step; + last_slot = Some(db_slot); + current_slot = db_slot; send_last_start_period = false; } @@ -560,33 +637,22 @@ pub fn stream_bootstrap_information( } // Setup final state global cursor - let final_state_global_step = if last_ledger_step.finished() - && last_pool_step.finished() - && last_cycle_step.finished() - && last_credits_step.finished() - && last_ops_step.finished() - && last_de_step.finished() - { - StreamingStep::Finished(Some(current_slot)) - } else { - StreamingStep::Ongoing(current_slot) - }; - - // Setup final state changes cursor - let final_state_changes_step = if final_state_changes.is_empty() { - StreamingStep::Finished(Some(current_slot)) - } else { - StreamingStep::Ongoing(current_slot) - }; + let final_state_global_step = + if last_state_step.finished() && last_versioning_step.finished() { + StreamingStep::Finished(Some(current_slot)) + } else { + StreamingStep::Ongoing(current_slot) + }; // Stream consensus blocks if final state base bootstrap is finished let mut consensus_part = BootstrapableGraph { final_blocks: Default::default(), }; let mut consensus_outdated_ids: PreHashSet = PreHashSet::default(); + if final_state_global_step.finished() { let (part, outdated_ids, new_consensus_step) = consensus_controller - .get_bootstrap_part(last_consensus_step, final_state_changes_step)?; + .get_bootstrap_part(last_consensus_step, final_state_global_step)?; consensus_part = part; consensus_outdated_ids = outdated_ids; last_consensus_step = new_consensus_step; @@ -607,10 +673,7 @@ pub fn stream_bootstrap_information( // If the consensus streaming is finished (also meaning that consensus slot == final state slot) exit // We don't bother with the bs-deadline, as this is the last step of the bootstrap process - defer to general write-timeout - if final_state_global_step.finished() - && final_state_changes_step.finished() - && last_consensus_step.finished() - { + if final_state_global_step.finished() && last_consensus_step.finished() { server.send_msg(write_timeout, BootstrapServerMessage::BootstrapFinished)?; break; } @@ -623,16 +686,12 @@ pub fn stream_bootstrap_information( write_timeout, BootstrapServerMessage::BootstrapPart { slot: current_slot, - ledger_part, - async_pool_part, - pos_cycle_part, - pos_credits_part, - exec_ops_part, - exec_de_part, - final_state_changes, + state_part, + versioning_part, consensus_part, consensus_outdated_ids, last_start_period, + last_slot_before_downtime, }, )?; } @@ -659,7 +718,6 @@ fn manage_bootstrap( consensus_controller: Box, protocol_controller: Box, deadline: Instant, - mip_store: MipStore, ) -> Result<(), BootstrapError> { massa_trace!("bootstrap.lib.manage_bootstrap", {}); let read_error_timeout: Duration = bootstrap_config.read_error_timeout.into(); @@ -721,12 +779,8 @@ fn manage_bootstrap( } BootstrapClientMessage::AskBootstrapPart { last_slot, - last_ledger_step, - last_pool_step, - last_cycle_step, - last_credits_step, - last_ops_step, - last_de_step, + last_state_step, + last_versioning_step, last_consensus_step, send_last_start_period, } => { @@ -735,29 +789,14 @@ fn manage_bootstrap( final_state.clone(), consensus_controller.clone(), last_slot, - last_ledger_step, - last_pool_step, - last_cycle_step, - last_credits_step, - last_ops_step, - last_de_step, + last_state_step, + last_versioning_step, last_consensus_step, send_last_start_period, &deadline, bootstrap_config.write_timeout.to_duration(), )?; } - BootstrapClientMessage::AskBootstrapMipStore => { - let vs = mip_store.0.read().to_owned(); - let Some(write_timeout) = step_timeout_duration(&deadline, &bootstrap_config.write_timeout.to_duration()) else { - return Err(BootstrapError::Interupted("insufficient time left to respond te request for mip-store".to_string())); - }; - - server.send_msg( - write_timeout, - BootstrapServerMessage::BootstrapMipStore { store: vs.clone() }, - )? - } BootstrapClientMessage::BootstrapSuccess => break Ok(()), BootstrapClientMessage::BootstrapError { error } => { break Err(BootstrapError::ReceivedError(error)); diff --git a/massa-bootstrap/src/server/white_black_list.rs b/massa-bootstrap/src/server/white_black_list.rs index e82b5e68d71..3afb1cef57f 100644 --- a/massa-bootstrap/src/server/white_black_list.rs +++ b/massa-bootstrap/src/server/white_black_list.rs @@ -9,8 +9,7 @@ use std::{ use crate::error::BootstrapError; use massa_logging::massa_trace; use parking_lot::RwLock; -use tracing::info; -use tracing::log::warn; +use tracing::{info, warn}; use crate::tools::normalize_ip; diff --git a/massa-bootstrap/src/settings.rs b/massa-bootstrap/src/settings.rs index ac2a8fca8d8..b7f220df71d 100644 --- a/massa-bootstrap/src/settings.rs +++ b/massa-bootstrap/src/settings.rs @@ -83,8 +83,8 @@ pub struct BootstrapConfig { pub max_operations_per_block: u32, /// max bootstrap error length pub max_bootstrap_error_length: u64, - /// max bootstrap final state parts size - pub max_bootstrap_final_state_parts_size: u64, + /// max bootstrap final state new_elements + pub max_new_elements: u64, /// max datastore entry count pub max_datastore_entry_count: u64, /// max datastore value length @@ -160,7 +160,7 @@ pub struct BootstrapClientConfig { pub thread_count: u8, pub randomness_size_bytes: usize, pub max_bootstrap_error_length: u64, - pub max_bootstrap_final_state_parts_size: u64, + pub max_new_elements: u64, pub max_datastore_entry_count: u64, pub max_datastore_key_length: u8, pub max_datastore_value_length: u64, @@ -191,7 +191,7 @@ pub struct BootstrapServerMessageDeserializerArgs { pub max_listeners_per_peer: u32, pub max_bootstrap_blocks_length: u32, pub max_operations_per_block: u32, - pub max_bootstrap_final_state_parts_size: u64, + pub max_new_elements: u64, pub max_async_pool_changes: u64, pub max_async_pool_length: u64, pub max_async_message_data: u64, diff --git a/massa-bootstrap/src/tests/binders.rs b/massa-bootstrap/src/tests/binders.rs index db876fc4e2c..3284b301644 100644 --- a/massa-bootstrap/src/tests/binders.rs +++ b/massa-bootstrap/src/tests/binders.rs @@ -9,8 +9,8 @@ use crate::{ use massa_models::config::{ BOOTSTRAP_RANDOMNESS_SIZE_BYTES, CONSENSUS_BOOTSTRAP_PART_SIZE, ENDORSEMENT_COUNT, MAX_ADVERTISE_LENGTH, MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, - MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, MAX_BOOTSTRAP_BLOCKS, MAX_BOOTSTRAP_ERROR_LENGTH, - MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, MAX_DATASTORE_ENTRY_COUNT, MAX_DATASTORE_KEY_LENGTH, + MAX_BOOTSTRAPPED_NEW_ELEMENTS, MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, MAX_BOOTSTRAP_BLOCKS, + MAX_BOOTSTRAP_ERROR_LENGTH, MAX_DATASTORE_ENTRY_COUNT, MAX_DATASTORE_KEY_LENGTH, MAX_DATASTORE_VALUE_LENGTH, MAX_DEFERRED_CREDITS_LENGTH, MAX_DENUNCIATIONS_PER_BLOCK_HEADER, MAX_DENUNCIATION_CHANGES_LENGTH, MAX_EXECUTED_OPS_CHANGES_LENGTH, MAX_EXECUTED_OPS_LENGTH, MAX_LEDGER_CHANGES_COUNT, MAX_LISTENERS_PER_PEER, MAX_OPERATIONS_PER_BLOCK, @@ -28,7 +28,7 @@ use std::str::FromStr; lazy_static::lazy_static! { pub static ref BOOTSTRAP_CONFIG_KEYPAIR: (BootstrapConfig, KeyPair) = { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); (get_bootstrap_config(NodeId::new(keypair.get_public_key())), keypair) }; } @@ -45,7 +45,7 @@ impl BootstrapClientBinder { thread_count: THREAD_COUNT, randomness_size_bytes: BOOTSTRAP_RANDOMNESS_SIZE_BYTES, max_bootstrap_error_length: MAX_BOOTSTRAP_ERROR_LENGTH, - max_bootstrap_final_state_parts_size: MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, + max_new_elements: MAX_BOOTSTRAPPED_NEW_ELEMENTS, max_datastore_entry_count: MAX_DATASTORE_ENTRY_COUNT, max_datastore_key_length: MAX_DATASTORE_KEY_LENGTH, max_datastore_value_length: MAX_DATASTORE_VALUE_LENGTH, @@ -94,10 +94,10 @@ fn test_binders() { bootstrap_config.bootstrap_list[0].1.get_public_key(), ); - let peer_id1 = PeerId::from_bytes(KeyPair::generate().get_public_key().to_bytes()).unwrap(); - let peer_id2 = PeerId::from_bytes(KeyPair::generate().get_public_key().to_bytes()).unwrap(); - let peer_id3 = PeerId::from_bytes(KeyPair::generate().get_public_key().to_bytes()).unwrap(); - let peer_id4 = PeerId::from_bytes(KeyPair::generate().get_public_key().to_bytes()).unwrap(); + let peer_id1 = PeerId::from_public_key(KeyPair::generate(0).unwrap().get_public_key()); + let peer_id2 = PeerId::from_public_key(KeyPair::generate(0).unwrap().get_public_key()); + let peer_id3 = PeerId::from_public_key(KeyPair::generate(0).unwrap().get_public_key()); + let peer_id4 = PeerId::from_public_key(KeyPair::generate(0).unwrap().get_public_key()); let server_thread = std::thread::Builder::new() .name("test_binders::server_thread".to_string()) @@ -244,10 +244,10 @@ fn test_binders_double_send_server_works() { bootstrap_config.bootstrap_list[0].1.get_public_key(), ); - let peer_id1 = PeerId::from_bytes(KeyPair::generate().get_public_key().to_bytes()).unwrap(); - let peer_id2 = PeerId::from_bytes(KeyPair::generate().get_public_key().to_bytes()).unwrap(); - let peer_id3 = PeerId::from_bytes(KeyPair::generate().get_public_key().to_bytes()).unwrap(); - let peer_id4 = PeerId::from_bytes(KeyPair::generate().get_public_key().to_bytes()).unwrap(); + let peer_id1 = PeerId::from_public_key(KeyPair::generate(0).unwrap().get_public_key()); + let peer_id2 = PeerId::from_public_key(KeyPair::generate(0).unwrap().get_public_key()); + let peer_id3 = PeerId::from_public_key(KeyPair::generate(0).unwrap().get_public_key()); + let peer_id4 = PeerId::from_public_key(KeyPair::generate(0).unwrap().get_public_key()); let server_thread = std::thread::Builder::new() .name("test_buinders_double_send_server_works::server_thread".to_string()) @@ -375,7 +375,7 @@ fn test_binders_try_double_send_client_works() { bootstrap_config.bootstrap_list[0].1.get_public_key(), ); - let peer_id1 = PeerId::from_bytes(KeyPair::generate().get_public_key().to_bytes()).unwrap(); + let peer_id1 = PeerId::from_public_key(KeyPair::generate(0).unwrap().get_public_key()); let server_thread = std::thread::Builder::new() .name("test_buinders_double_send_client_works::server_thread".to_string()) diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index b0c56f13ce3..929c2d42039 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -5,32 +5,28 @@ use super::tools::{ }; use crate::listener::PollEvent; use crate::tests::tools::{ - get_random_async_pool_changes, get_random_executed_de_changes, get_random_executed_ops_changes, - get_random_pos_changes, + assert_eq_bootstrap_graph, get_random_async_pool_changes, get_random_executed_de_changes, + get_random_executed_ops_changes, get_random_pos_changes, }; use crate::{ - client::MockBSConnector, - get_state, - server::MockBSEventPoller, - start_bootstrap_server, - tests::tools::{assert_eq_bootstrap_graph, get_bootstrap_config}, + client::MockBSConnector, get_state, server::MockBSEventPoller, start_bootstrap_server, + tests::tools::get_bootstrap_config, }; use crate::{BootstrapConfig, BootstrapManager, BootstrapTcpListener}; use massa_async_pool::AsyncPoolConfig; use massa_consensus_exports::{ bootstrapable_graph::BootstrapableGraph, test_exports::MockConsensusControllerImpl, }; +use massa_db::{DBBatch, MassaDB, MassaDBConfig}; use massa_executed_ops::{ExecutedDenunciationsConfig, ExecutedOpsConfig}; use massa_final_state::{ test_exports::{assert_eq_final_state, assert_eq_final_state_hash}, FinalState, FinalStateConfig, StateChanges, }; -use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_ledger_exports::LedgerConfig; use massa_models::config::{ - DENUNCIATION_EXPIRE_PERIODS, ENDORSEMENT_COUNT, EXECUTED_OPS_BOOTSTRAP_PART_SIZE, - MAX_DENUNCIATIONS_PER_BLOCK_HEADER, MIP_STORE_STATS_BLOCK_CONSIDERED, - MIP_STORE_STATS_COUNTERS_MAX, + DENUNCIATION_EXPIRE_PERIODS, ENDORSEMENT_COUNT, GENESIS_TIMESTAMP, MAX_DEFERRED_CREDITS_LENGTH, + MAX_DENUNCIATIONS_PER_BLOCK_HEADER, MAX_PRODUCTION_STATS_LENGTH, MAX_ROLLS_COUNT_LENGTH, T0, }; use massa_models::{ address::Address, config::MAX_DATASTORE_VALUE_LENGTH, node::NodeId, slot::Slot, @@ -50,20 +46,17 @@ use massa_pos_worker::start_selector_worker; use massa_protocol_exports::MockProtocolController; use massa_signature::KeyPair; use massa_time::MassaTime; -use massa_versioning_worker::versioning::{ - MipComponent, MipInfo, MipState, MipStatsConfig, MipStore, -}; use mockall::Sequence; use parking_lot::RwLock; -use std::collections::HashMap; use std::net::{SocketAddr, TcpStream}; use std::sync::{Condvar, Mutex}; +use std::vec; use std::{path::PathBuf, str::FromStr, sync::Arc, time::Duration}; use tempfile::TempDir; lazy_static::lazy_static! { pub static ref BOOTSTRAP_CONFIG_KEYPAIR: (BootstrapConfig, KeyPair) = { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); (get_bootstrap_config(NodeId::new(keypair.get_public_key())), keypair) }; } @@ -73,7 +66,7 @@ fn mock_bootstrap_manager(addr: SocketAddr, bootstrap_config: BootstrapConfig) - let rolls_path = PathBuf::from_str("../massa-node/base_config/initial_rolls.json").unwrap(); let thread_count = 2; let periods_per_cycle = 2; - let genesis_address = Address::from_public_key(&KeyPair::generate().get_public_key()); + let genesis_address = Address::from_public_key(&KeyPair::generate(0).unwrap().get_public_key()); // setup selector local config let selector_local_config = SelectorConfig { thread_count, @@ -82,22 +75,6 @@ fn mock_bootstrap_manager(addr: SocketAddr, bootstrap_config: BootstrapConfig) - ..Default::default() }; - // create a MIP store - let mip_stats_cfg = MipStatsConfig { - block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, - counters_max: MIP_STORE_STATS_COUNTERS_MAX, - }; - let mi_1 = MipInfo { - name: "MIP-0002".to_string(), - version: 2, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(5), - timeout: MassaTime::from(10), - activation_delay: MassaTime::from(4), - }; - let state_1 = MipState::new(MassaTime::from(3)); - let mip_store = MipStore::try_from(([(mi_1, state_1)], mip_stats_cfg.clone())).unwrap(); - // start bootstrap manager let (_, keypair): &(BootstrapConfig, KeyPair) = &BOOTSTRAP_CONFIG_KEYPAIR; let mut mocked1 = Box::new(MockProtocolController::new()); @@ -111,31 +88,36 @@ fn mock_bootstrap_manager(addr: SocketAddr, bootstrap_config: BootstrapConfig) - // setup final state local config let temp_dir = TempDir::new().unwrap(); + let db_config = MassaDBConfig { + path: temp_dir.path().to_path_buf(), + max_history_length: 10, + max_new_elements: 100, + thread_count, + }; + let db = Arc::new(RwLock::new(MassaDB::new(db_config))); let final_state_local_config = FinalStateConfig { ledger_config: LedgerConfig { thread_count, initial_ledger_path: "".into(), disk_ledger_path: temp_dir.path().to_path_buf(), max_key_length: MAX_DATASTORE_KEY_LENGTH, - max_ledger_part_size: 100_000, max_datastore_value_length: MAX_DATASTORE_VALUE_LENGTH, }, async_pool_config: AsyncPoolConfig { thread_count, max_length: MAX_ASYNC_POOL_LENGTH, max_async_message_data: MAX_ASYNC_MESSAGE_DATA, - bootstrap_part_size: 100, + max_key_length: MAX_DATASTORE_KEY_LENGTH as u32, }, pos_config: PoSConfig { periods_per_cycle, thread_count, cycle_history_length: POS_SAVED_CYCLES, - credits_bootstrap_part_size: 100, - }, - executed_ops_config: ExecutedOpsConfig { - thread_count, - bootstrap_part_size: 10, + max_rolls_length: MAX_ROLLS_COUNT_LENGTH, + max_production_stats_length: MAX_PRODUCTION_STATS_LENGTH, + max_credit_length: MAX_DEFERRED_CREDITS_LENGTH, }, + executed_ops_config: ExecutedOpsConfig { thread_count }, final_history_length: 100, initial_seed_string: "".into(), initial_rolls_path: "".into(), @@ -143,11 +125,14 @@ fn mock_bootstrap_manager(addr: SocketAddr, bootstrap_config: BootstrapConfig) - periods_per_cycle, executed_denunciations_config: ExecutedDenunciationsConfig { denunciation_expire_periods: DENUNCIATION_EXPIRE_PERIODS, - bootstrap_part_size: EXECUTED_OPS_BOOTSTRAP_PART_SIZE, + thread_count, + endorsement_count: ENDORSEMENT_COUNT, }, endorsement_count: ENDORSEMENT_COUNT, max_executed_denunciations_length: 1000, max_denunciations_per_block_header: MAX_DENUNCIATIONS_PER_BLOCK_HEADER, + t0: T0, + genesis_timestamp: *GENESIS_TIMESTAMP, }; let final_state_server = Arc::new(RwLock::new(get_random_final_state_bootstrap( @@ -156,10 +141,11 @@ fn mock_bootstrap_manager(addr: SocketAddr, bootstrap_config: BootstrapConfig) - "", &rolls_path, server_selector_controller.clone(), - Hash::from_bytes(&[0; HASH_SIZE_BYTES]), + db.clone(), ) .unwrap(), final_state_local_config.clone(), + db.clone(), ))); let mut stream_mock1 = Box::new(MockConsensusControllerImpl::new()); let mut stream_mock2 = Box::new(MockConsensusControllerImpl::new()); @@ -179,7 +165,6 @@ fn mock_bootstrap_manager(addr: SocketAddr, bootstrap_config: BootstrapConfig) - bootstrap_config.clone(), keypair.clone(), Version::from_str("TEST.1.10").unwrap(), - mip_store, ) .unwrap() } @@ -200,58 +185,56 @@ fn test_bootstrap_server() { let periods_per_cycle = 2; let (bootstrap_config, keypair): &(BootstrapConfig, KeyPair) = &BOOTSTRAP_CONFIG_KEYPAIR; let rolls_path = PathBuf::from_str("../massa-node/base_config/initial_rolls.json").unwrap(); - let genesis_address = Address::from_public_key(&KeyPair::generate().get_public_key()); + let genesis_address = Address::from_public_key(&KeyPair::generate(0).unwrap().get_public_key()); // let (consensus_controller, mut consensus_event_receiver) = // MockConsensusController::new_with_receiver(); // let (network_cmd_tx, mut network_cmd_rx) = mpsc::channel::(5); - // create a MIP store - let mip_stats_cfg = MipStatsConfig { - block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, - counters_max: MIP_STORE_STATS_COUNTERS_MAX, + // setup final state local config + let temp_dir_server = TempDir::new().unwrap(); + let db_server_config = MassaDBConfig { + path: temp_dir_server.path().to_path_buf(), + max_history_length: 10, + max_new_elements: 100, + thread_count, }; - let mi_1 = MipInfo { - name: "MIP-0002".to_string(), - version: 2, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(5), - timeout: MassaTime::from(10), - activation_delay: MassaTime::from(4), + let db_server = Arc::new(RwLock::new(MassaDB::new(db_server_config))); + let temp_dir_client = TempDir::new().unwrap(); + let db_client_config = MassaDBConfig { + path: temp_dir_client.path().to_path_buf(), + max_history_length: 10, + max_new_elements: 100, + thread_count, }; - let state_1 = MipState::new(MassaTime::from(3)); - let mip_store = MipStore::try_from(([(mi_1, state_1)], mip_stats_cfg.clone())).unwrap(); - - // setup final state local config - let temp_dir = TempDir::new().unwrap(); + let db_client = Arc::new(RwLock::new(MassaDB::new(db_client_config))); let final_state_local_config = FinalStateConfig { ledger_config: LedgerConfig { thread_count, initial_ledger_path: "".into(), - disk_ledger_path: temp_dir.path().to_path_buf(), + disk_ledger_path: temp_dir_server.path().to_path_buf(), max_key_length: MAX_DATASTORE_KEY_LENGTH, - max_ledger_part_size: 100_000, max_datastore_value_length: MAX_DATASTORE_VALUE_LENGTH, }, async_pool_config: AsyncPoolConfig { thread_count, max_length: MAX_ASYNC_POOL_LENGTH, max_async_message_data: MAX_ASYNC_MESSAGE_DATA, - bootstrap_part_size: 100, + max_key_length: MAX_DATASTORE_KEY_LENGTH as u32, }, pos_config: PoSConfig { periods_per_cycle, thread_count, cycle_history_length: POS_SAVED_CYCLES, - credits_bootstrap_part_size: 100, - }, - executed_ops_config: ExecutedOpsConfig { - thread_count, - bootstrap_part_size: 10, + max_rolls_length: MAX_ROLLS_COUNT_LENGTH, + max_production_stats_length: MAX_PRODUCTION_STATS_LENGTH, + max_credit_length: MAX_DEFERRED_CREDITS_LENGTH, }, + executed_ops_config: ExecutedOpsConfig { thread_count }, executed_denunciations_config: ExecutedDenunciationsConfig { denunciation_expire_periods: DENUNCIATION_EXPIRE_PERIODS, - bootstrap_part_size: 10, + thread_count, + endorsement_count: ENDORSEMENT_COUNT, }, final_history_length: 100, initial_seed_string: "".into(), @@ -261,6 +244,8 @@ fn test_bootstrap_server() { thread_count, periods_per_cycle, max_denunciations_per_block_header: MAX_DENUNCIATIONS_PER_BLOCK_HEADER, + t0: T0, + genesis_timestamp: *GENESIS_TIMESTAMP, }; // setup selector local config @@ -279,28 +264,86 @@ fn test_bootstrap_server() { start_selector_worker(selector_local_config) .expect("could not start client selector controller"); + let pos_server = PoSFinalState::new( + final_state_local_config.pos_config.clone(), + "", + &rolls_path, + server_selector_controller.clone(), + db_server.clone(), + ); + // setup final states let final_state_server = Arc::new(RwLock::new(get_random_final_state_bootstrap( - PoSFinalState::new( - final_state_local_config.pos_config.clone(), - "", - &rolls_path, - server_selector_controller.clone(), - Hash::from_bytes(&[0; HASH_SIZE_BYTES]), - ) - .unwrap(), + pos_server.unwrap(), final_state_local_config.clone(), + db_server.clone(), ))); + + let mut current_slot: Slot = Slot::new(0, thread_count - 1); + + for _ in 0..10 { + std::thread::sleep(Duration::from_millis(500)); + + let mut final_write = final_state_server.write(); + + let changes = StateChanges { + pos_changes: get_random_pos_changes(10), + ledger_changes: get_random_ledger_changes(10), + async_pool_changes: get_random_async_pool_changes(10, thread_count), + executed_ops_changes: get_random_executed_ops_changes(10), + executed_denunciations_changes: get_random_executed_de_changes(10), + }; + + let next = current_slot.get_next_slot(thread_count).unwrap(); + + let mut batch = DBBatch::new(); + + final_write + .pos_state + .apply_changes_to_batch(changes.pos_changes.clone(), next, false, &mut batch) + .unwrap(); + final_write + .ledger + .apply_changes_to_batch(changes.ledger_changes.clone(), &mut batch); + final_write + .async_pool + .apply_changes_to_batch(&changes.async_pool_changes, &mut batch); + final_write.executed_ops.apply_changes_to_batch( + changes.executed_ops_changes.clone(), + next, + &mut batch, + ); + final_write.executed_denunciations.apply_changes_to_batch( + changes.executed_denunciations_changes.clone(), + next, + &mut batch, + ); + + final_write + .db + .write() + .write_batch(batch, Default::default(), Some(next)); + + let final_state_hash = final_write.db.read().get_db_hash(); + let cycle = next.get_cycle(final_state_local_config.periods_per_cycle.clone()); + final_write + .pos_state + .feed_cycle_state_hash(cycle, final_state_hash); + + current_slot = next; + } + let final_state_client = Arc::new(RwLock::new(FinalState::create_final_state( PoSFinalState::new( final_state_local_config.pos_config.clone(), "", &rolls_path, client_selector_controller.clone(), - Hash::from_bytes(&[0; HASH_SIZE_BYTES]), + db_client.clone(), ) .unwrap(), - final_state_local_config, + final_state_local_config.clone(), + db_client.clone(), ))); // setup final state mocks. @@ -329,25 +372,25 @@ fn test_bootstrap_server() { let sent_graph_clone = sent_graph.clone(); stream_mock3 .expect_get_bootstrap_part() - .times(10) + .times(2) .in_sequence(&mut seq) - .returning(move |_, slot| { - if StreamingStep::Ongoing(Slot::new(1, 1)) == slot { - Ok(( + .returning( + move |last_consensus_step, _slot| match last_consensus_step { + StreamingStep::Started => Ok(( sent_graph_clone.clone(), PreHashSet::default(), - StreamingStep::Started, - )) - } else { - Ok(( + StreamingStep::Ongoing(PreHashSet::default()), + )), + _ => Ok(( BootstrapableGraph { final_blocks: vec![], }, PreHashSet::default(), StreamingStep::Finished(None), - )) - } - }); + )), + }, + ); + stream_mock2 .expect_clone_box() .return_once(move || stream_mock3); @@ -355,8 +398,6 @@ fn test_bootstrap_server() { .expect_clone_box() .return_once(move || stream_mock2); - let cloned_store = mip_store.clone(); - // Start the bootstrap server thread let bootstrap_manager_thread = std::thread::Builder::new() .name("bootstrap_thread".to_string()) @@ -369,7 +410,6 @@ fn test_bootstrap_server() { bootstrap_config.clone(), keypair.clone(), Version::from_str("TEST.1.10").unwrap(), - cloned_store, ) .unwrap() }) @@ -381,23 +421,59 @@ fn test_bootstrap_server() { let mod_thread = std::thread::Builder::new() .name("modifier thread".to_string()) .spawn(move || { + let mut current_slot = Slot::new(5, 1); + for _ in 0..10 { std::thread::sleep(Duration::from_millis(500)); let mut final_write = final_state_server_clone2.write(); - let next = final_write.slot.get_next_slot(thread_count).unwrap(); - final_write.slot = next; + let next = current_slot.get_next_slot(thread_count).unwrap(); + let changes = StateChanges { pos_changes: get_random_pos_changes(10), ledger_changes: get_random_ledger_changes(10), - async_pool_changes: get_random_async_pool_changes(10), + async_pool_changes: get_random_async_pool_changes(10, thread_count), executed_ops_changes: get_random_executed_ops_changes(10), executed_denunciations_changes: get_random_executed_de_changes(10), }; + + let mut batch = DBBatch::new(); + + final_write + .pos_state + .apply_changes_to_batch(changes.pos_changes.clone(), next, false, &mut batch) + .unwrap(); + final_write + .ledger + .apply_changes_to_batch(changes.ledger_changes.clone(), &mut batch); + final_write + .async_pool + .apply_changes_to_batch(&changes.async_pool_changes, &mut batch); + final_write.executed_ops.apply_changes_to_batch( + changes.executed_ops_changes.clone(), + next, + &mut batch, + ); + final_write.executed_denunciations.apply_changes_to_batch( + changes.executed_denunciations_changes.clone(), + next, + &mut batch, + ); + final_write - .changes_history - .push_back((next, changes.clone())); + .db + .write() + .write_batch(batch, Default::default(), Some(next)); + + let final_state_hash = final_write.db.read().get_db_hash(); + let cycle = next.get_cycle(final_state_local_config.periods_per_cycle.clone()); + final_write + .pos_state + .feed_cycle_state_hash(cycle, final_state_hash); + let mut list_changes_write = list_changes_clone.write(); list_changes_write.push((next, changes)); + + current_slot = next; } }) .unwrap(); @@ -408,38 +484,29 @@ fn test_bootstrap_server() { final_state_client_clone, mock_remote_connector, Version::from_str("TEST.1.10").unwrap(), - MassaTime::now().unwrap().saturating_sub(1000.into()), + MassaTime::now() + .unwrap() + .saturating_sub(MassaTime::from_millis(1000)), None, None, Arc::new((Mutex::new(false), Condvar::new())), ) .unwrap(); - // apply the changes to the server state before matching with the client - { - let mut final_state_server_write = final_state_server.write(); - let list_changes_read = list_changes.read().clone(); - // note: skip the first change to match the update loop behaviour - for (slot, change) in list_changes_read.iter().skip(1) { - final_state_server_write - .pos_state - .apply_changes(change.pos_changes.clone(), *slot, false) - .unwrap(); - final_state_server_write.ledger.apply_changes( - change.ledger_changes.clone(), - *slot, - None, - ); - final_state_server_write - .async_pool - .apply_changes_unchecked(&change.async_pool_changes); - final_state_server_write - .executed_ops - .apply_changes(change.executed_ops_changes.clone(), *slot); - } - } + // Make sure the modifier thread has done its job mod_thread.join().unwrap(); + { + let mut final_state_client_write = final_state_client.write(); + + assert!( + final_state_client_write.is_db_valid(), + "Client's DB is not valid after bootstrap" + ); + + final_state_client_write.recompute_caches(); + } + // check final states assert_eq_final_state(&final_state_server.read(), &final_state_client.read()); assert_eq_final_state_hash(&final_state_server.read(), &final_state_client.read()); @@ -447,7 +514,6 @@ fn test_bootstrap_server() { // compute initial draws final_state_server.write().compute_initial_draws().unwrap(); final_state_client.write().compute_initial_draws().unwrap(); - // check selection draw let server_selection = server_selector_controller.get_entire_selection(); let client_selection = client_selector_controller.get_entire_selection(); @@ -463,10 +529,12 @@ fn test_bootstrap_server() { // check graphs assert_eq_bootstrap_graph(&sent_graph, &bootstrap_res.graph.unwrap()); + /* // check mip store let mip_raw_orig = mip_store.0.read().to_owned(); let mip_raw_received = bootstrap_res.mip_store.unwrap().0.read().to_owned(); assert_eq!(mip_raw_orig, mip_raw_received); + */ // stop bootstrap server bootstrap_manager_thread diff --git a/massa-bootstrap/src/tests/tools.rs b/massa-bootstrap/src/tests/tools.rs index 6b4be8e4c3d..086e298b0f6 100644 --- a/massa-bootstrap/src/tests/tools.rs +++ b/massa-bootstrap/src/tests/tools.rs @@ -2,14 +2,15 @@ use crate::settings::{BootstrapConfig, IpType}; use bitvec::vec::BitVec; -use massa_async_pool::test_exports::{create_async_pool, get_random_message}; -use massa_async_pool::{AsyncPoolChanges, Change}; +use massa_async_pool::AsyncPoolChanges; +use massa_async_pool::{test_exports::get_random_message, AsyncPool}; use massa_consensus_exports::{ bootstrapable_graph::{ BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, }, export_active_block::{ExportActiveBlock, ExportActiveBlockSerializer}, }; +use massa_db::{DBBatch, MassaDB}; use massa_executed_ops::{ ExecutedDenunciations, ExecutedDenunciationsChanges, ExecutedDenunciationsConfig, ExecutedOps, ExecutedOpsConfig, @@ -24,8 +25,8 @@ use massa_models::bytecode::Bytecode; use massa_models::config::{ BOOTSTRAP_RANDOMNESS_SIZE_BYTES, CONSENSUS_BOOTSTRAP_PART_SIZE, ENDORSEMENT_COUNT, MAX_ADVERTISE_LENGTH, MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, - MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, MAX_BOOTSTRAP_BLOCKS, MAX_BOOTSTRAP_ERROR_LENGTH, - MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, MAX_CONSENSUS_BLOCKS_IDS, MAX_DATASTORE_ENTRY_COUNT, + MAX_BOOTSTRAPPED_NEW_ELEMENTS, MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, MAX_BOOTSTRAP_BLOCKS, + MAX_BOOTSTRAP_ERROR_LENGTH, MAX_CONSENSUS_BLOCKS_IDS, MAX_DATASTORE_ENTRY_COUNT, MAX_DATASTORE_KEY_LENGTH, MAX_DATASTORE_VALUE_LENGTH, MAX_DEFERRED_CREDITS_LENGTH, MAX_DENUNCIATIONS_PER_BLOCK_HEADER, MAX_DENUNCIATION_CHANGES_LENGTH, MAX_EXECUTED_OPS_CHANGES_LENGTH, MAX_EXECUTED_OPS_LENGTH, MAX_FUNCTION_NAME_LENGTH, @@ -51,14 +52,17 @@ use massa_models::{ secure_share::SecureShareContent, slot::Slot, }; -use massa_pos_exports::{CycleInfo, DeferredCredits, PoSChanges, PoSFinalState, ProductionStats}; +use massa_pos_exports::{DeferredCredits, PoSChanges, PoSFinalState, ProductionStats}; use massa_protocol_exports::{BootstrapPeers, PeerId, TransportType}; use massa_serialization::{DeserializeError, Deserializer, Serializer}; use massa_signature::KeyPair; use massa_time::MassaTime; +use massa_versioning::versioning::{MipStatsConfig, MipStore}; +use parking_lot::RwLock; use rand::Rng; -use std::collections::{HashMap, HashSet, VecDeque}; +use std::collections::{HashMap, HashSet}; use std::str::FromStr; +use std::sync::Arc; use std::{ collections::BTreeMap, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -113,7 +117,6 @@ pub fn get_random_ledger_changes(r_limit: u64) -> LedgerChanges { /// generates random PoS cycles info fn get_random_pos_cycles_info( r_limit: u64, - opt_seed: bool, ) -> ( BTreeMap, PreHashMap, @@ -134,10 +137,6 @@ fn get_random_pos_cycles_info( }, ); } - // note: extra seed is used in the changes test to compensate for the update loop skipping the first change - if opt_seed { - rng_seed.push(rng.gen_range(0..2) == 1); - } rng_seed.push(rng.gen_range(0..2) == 1); (roll_counts, production_stats, rng_seed) } @@ -163,25 +162,39 @@ fn get_random_deferred_credits(r_limit: u64) -> DeferredCredits { } /// generates a random PoS final state -fn get_random_pos_state(r_limit: u64, pos: PoSFinalState) -> PoSFinalState { - let mut cycle_history = VecDeque::new(); - let (roll_counts, production_stats, rng_seed) = get_random_pos_cycles_info(r_limit, true); - let mut cycle = CycleInfo::new_with_hash(0, false, roll_counts, rng_seed, production_stats); - cycle.final_state_hash_snapshot = Some(Hash::from_bytes(&[0; 32])); - cycle_history.push_back(cycle); +fn get_random_pos_state(r_limit: u64, mut pos: PoSFinalState) -> PoSFinalState { + let (roll_counts, production_stats, _rng_seed) = get_random_pos_cycles_info(r_limit); let mut deferred_credits = DeferredCredits::new_with_hash(); deferred_credits.extend(get_random_deferred_credits(r_limit)); - PoSFinalState { - cycle_history, + + // Do not add seed_bits to changes, as we create the initial cycle just after + let changes = PoSChanges { + seed_bits: Default::default(), + roll_changes: roll_counts.into_iter().collect(), + production_stats, deferred_credits, - ..pos - } + }; + + let mut batch = DBBatch::new(); + + pos.create_initial_cycle(&mut batch); + + pos.db.write().write_batch(batch, Default::default(), None); + + let mut batch = DBBatch::new(); + + pos.apply_changes_to_batch(changes, Slot::new(0, 0), false, &mut batch) + .expect("Critical: Error while applying changes to pos_state"); + + pos.db.write().write_batch(batch, Default::default(), None); + + pos } /// generates random PoS changes pub fn get_random_pos_changes(r_limit: u64) -> PoSChanges { let deferred_credits = get_random_deferred_credits(r_limit); - let (roll_counts, production_stats, seed_bits) = get_random_pos_cycles_info(r_limit, false); + let (roll_counts, production_stats, seed_bits) = get_random_pos_cycles_info(r_limit); PoSChanges { seed_bits, roll_changes: roll_counts.into_iter().collect(), @@ -190,15 +203,20 @@ pub fn get_random_pos_changes(r_limit: u64) -> PoSChanges { } } -pub fn get_random_async_pool_changes(r_limit: u64) -> AsyncPoolChanges { +pub fn get_random_async_pool_changes(r_limit: u64, thread_count: u8) -> AsyncPoolChanges { let mut changes = AsyncPoolChanges::default(); for _ in 0..(r_limit / 2) { - let message = get_random_message(Some(Amount::from_str("10").unwrap())); - changes.0.push(Change::Add(message.compute_id(), message)); + let message = get_random_message(Some(Amount::from_str("10").unwrap()), thread_count); + changes + .0 + .insert(message.compute_id(), SetUpdateOrDelete::Set(message)); } for _ in (r_limit / 2)..r_limit { - let message = get_random_message(Some(Amount::from_str("1_000_000").unwrap())); - changes.0.push(Change::Add(message.compute_id(), message)); + let message = + get_random_message(Some(Amount::from_str("1_000_000").unwrap()), thread_count); + changes + .0 + .insert(message.compute_id(), SetUpdateOrDelete::Set(message)); } changes } @@ -207,9 +225,12 @@ pub fn get_random_executed_ops( _r_limit: u64, slot: Slot, config: ExecutedOpsConfig, + db: Arc>, ) -> ExecutedOps { - let mut executed_ops = ExecutedOps::new(config.clone()); - executed_ops.apply_changes(get_random_executed_ops_changes(10), slot); + let mut executed_ops = ExecutedOps::new(config.clone(), db.clone()); + let mut batch = DBBatch::new(); + executed_ops.apply_changes_to_batch(get_random_executed_ops_changes(10), slot, &mut batch); + db.write().write_batch(batch, Default::default(), None); executed_ops } @@ -234,9 +255,17 @@ pub fn get_random_executed_de( _r_limit: u64, slot: Slot, config: ExecutedDenunciationsConfig, + db: Arc>, ) -> ExecutedDenunciations { - let mut executed_de = ExecutedDenunciations::new(config); - executed_de.apply_changes(get_random_executed_de_changes(10), slot); + let mut executed_de = ExecutedDenunciations::new(config, db); + let mut batch = DBBatch::new(); + executed_de.apply_changes_to_batch(get_random_executed_de_changes(10), slot, &mut batch); + + executed_de + .db + .write() + .write_batch(batch, Default::default(), None); + executed_de } @@ -263,42 +292,68 @@ pub fn get_random_executed_de_changes(r_limit: u64) -> ExecutedDenunciationsChan pub fn get_random_final_state_bootstrap( pos: PoSFinalState, config: FinalStateConfig, + db: Arc>, ) -> FinalState { let r_limit: u64 = 50; let mut sorted_ledger = HashMap::new(); let mut messages = AsyncPoolChanges::default(); for _ in 0..r_limit { - let message = get_random_message(None); - messages.0.push(Change::Add(message.compute_id(), message)); + let message = get_random_message(None, config.thread_count); + messages + .0 + .insert(message.compute_id(), SetUpdateOrDelete::Set(message)); } for _ in 0..r_limit { sorted_ledger.insert(get_random_address(), get_random_ledger_entry()); } - // insert the last possible address to prevent the last cursor to move when testing the changes - // The magic number at idx 0 is to account for address variant leader. At time of writing, - // the highest value for encoding this variant in serialized form is `1`. - let mut bytes = [255; 33]; - bytes[0] = 1; - sorted_ledger.insert( - Address::from_prefixed_bytes(&bytes).unwrap(), - get_random_ledger_entry(), + let slot = Slot::new(0, 0); + let final_ledger = create_final_ledger(db.clone(), config.ledger_config.clone(), sorted_ledger); + + let mut async_pool = AsyncPool::new(config.async_pool_config.clone(), db.clone()); + let mut batch = DBBatch::new(); + let versioning_batch = DBBatch::new(); + + async_pool.apply_changes_to_batch(&messages, &mut batch); + async_pool + .db + .write() + .write_batch(batch, versioning_batch, None); + + let executed_ops = get_random_executed_ops( + r_limit, + slot, + config.executed_ops_config.clone(), + db.clone(), ); - let slot = Slot::new(0, 0); - let final_ledger = create_final_ledger(config.ledger_config.clone(), sorted_ledger); - let mut async_pool = create_async_pool(config.async_pool_config.clone(), BTreeMap::new()); - async_pool.apply_changes_unchecked(&messages); + let executed_denunciations = get_random_executed_de( + r_limit, + slot, + config.executed_denunciations_config.clone(), + db.clone(), + ); + + let pos_state = get_random_pos_state(r_limit, pos); + + let mip_store = MipStore::try_from(( + [], + MipStatsConfig { + block_count_considered: 10, + counters_max: 10, + }, + )) + .unwrap(); create_final_state( - config.clone(), - slot, + config, Box::new(final_ledger), async_pool, - VecDeque::new(), - get_random_pos_state(r_limit, pos), - get_random_executed_ops(r_limit, slot, config.executed_ops_config), - get_random_executed_de(r_limit, slot, config.executed_denunciations_config), + pos_state, + executed_ops, + executed_denunciations, + mip_store, + db, ) } @@ -307,7 +362,7 @@ pub fn get_dummy_block_id(s: &str) -> BlockId { } pub fn get_random_address() -> Address { - let priv_key = KeyPair::generate(); + let priv_key = KeyPair::generate(0).unwrap(); Address::from_public_key(&priv_key.get_public_key()) } @@ -315,14 +370,14 @@ pub fn get_bootstrap_config(bootstrap_public_key: NodeId) -> BootstrapConfig { BootstrapConfig { listen_addr: Some("0.0.0.0:31244".parse().unwrap()), bootstrap_protocol: IpType::Both, - bootstrap_timeout: 120000.into(), - connect_timeout: 200.into(), - retry_delay: 200.into(), + bootstrap_timeout: MassaTime::from_millis(120000), + connect_timeout: MassaTime::from_millis(200), + retry_delay: MassaTime::from_millis(200), max_ping: MassaTime::from_millis(500), - read_timeout: 1000.into(), - write_timeout: 1000.into(), - read_error_timeout: 200.into(), - write_error_timeout: 200.into(), + read_timeout: MassaTime::from_millis(1000), + write_timeout: MassaTime::from_millis(1000), + read_error_timeout: MassaTime::from_millis(200), + write_error_timeout: MassaTime::from_millis(200), max_listeners_per_peer: 100, bootstrap_list: vec![( SocketAddr::new(BASE_BOOTSTRAP_IP, 8069), @@ -336,10 +391,10 @@ pub fn get_bootstrap_config(bootstrap_public_key: NodeId) -> BootstrapConfig { "../massa-node/base_config/bootstrap_blacklist.json", ), max_clock_delta: MassaTime::from_millis(1000), - cache_duration: 10000.into(), + cache_duration: MassaTime::from_millis(10000), max_simultaneous_bootstraps: 2, ip_list_max_size: 10, - per_ip_min_interval: 10000.into(), + per_ip_min_interval: MassaTime::from_millis(10000), max_bytes_read_write: std::f64::INFINITY, max_datastore_key_length: MAX_DATASTORE_KEY_LENGTH, randomness_size_bytes: BOOTSTRAP_RANDOMNESS_SIZE_BYTES, @@ -349,7 +404,7 @@ pub fn get_bootstrap_config(bootstrap_public_key: NodeId) -> BootstrapConfig { max_advertise_length: MAX_ADVERTISE_LENGTH, max_bootstrap_blocks_length: MAX_BOOTSTRAP_BLOCKS, max_bootstrap_error_length: MAX_BOOTSTRAP_ERROR_LENGTH, - max_bootstrap_final_state_parts_size: MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, + max_new_elements: MAX_BOOTSTRAPPED_NEW_ELEMENTS, max_async_pool_changes: MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, max_async_pool_length: MAX_ASYNC_POOL_LENGTH, max_async_message_data: MAX_ASYNC_MESSAGE_DATA, @@ -395,12 +450,14 @@ pub fn assert_eq_bootstrap_graph(v1: &BootstrapableGraph, v2: &BootstrapableGrap } pub fn get_boot_state() -> BootstrapableGraph { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let block = Block::new_verifiable( Block { header: BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, // associated slot // all header endorsements are supposed to point towards this one slot: Slot::new(1, 0), @@ -484,11 +541,11 @@ pub fn get_peers(keypair: &KeyPair) -> BootstrapPeers { listeners2.insert("82.220.123.78:8080".parse().unwrap(), TransportType::Tcp); BootstrapPeers(vec![ ( - PeerId::from_bytes(keypair.get_public_key().to_bytes()).unwrap(), + PeerId::from_public_key(keypair.get_public_key()), listeners1, ), ( - PeerId::from_bytes(keypair.get_public_key().to_bytes()).unwrap(), + PeerId::from_public_key(keypair.get_public_key()), listeners2, ), ]) diff --git a/massa-cipher/Cargo.toml b/massa-cipher/Cargo.toml index 06c83221097..3fb56a32537 100644 --- a/massa-cipher/Cargo.toml +++ b/massa-cipher/Cargo.toml @@ -1,20 +1,16 @@ [package] name = "massa_cipher" -version = "0.1.0" +version = "0.23.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] displaydoc = "0.2" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -serde_qs = "0.11" thiserror = "1.0" aes-gcm = "0.10" pbkdf2 = "0.11" rand = "0.8" -rand_core = { version = "0.6", features = ["std"] } # custom modules massa_serialization = { path = "../massa-serialization" } diff --git a/massa-client/Cargo.toml b/massa-client/Cargo.toml index b948be05d0e..8226b5c4dee 100644 --- a/massa-client/Cargo.toml +++ b/massa-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa-client" -version = "0.1.0" +version = "0.23.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -28,6 +28,7 @@ massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } massa_sdk = { path = "../massa-sdk" } massa_wallet = { path = "../massa-wallet" } +massa_proto = { path = "../massa-proto" } [dev-dependencies] toml_edit = "0.19" diff --git a/massa-client/base_config/config.toml b/massa-client/base_config/config.toml index 586465305c7..7de719c6990 100644 --- a/massa-client/base_config/config.toml +++ b/massa-client/base_config/config.toml @@ -7,6 +7,7 @@ timeout = 1000 ip = "127.0.0.1" private_port = 33034 public_port = 33035 +grpc_port = 33037 [client] # maximum size in bytes of a request diff --git a/massa-client/src/cmds.rs b/massa-client/src/cmds.rs index dc253754526..c13d829d7f5 100644 --- a/massa-client/src/cmds.rs +++ b/massa-client/src/cmds.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022 MASSA LABS use crate::display::Output; -use crate::{client_warning, rpc_error}; +use crate::{client_warning, grpc_error, rpc_error}; use anyhow::{anyhow, bail, Result}; use console::style; use massa_api_exports::{ @@ -22,10 +22,12 @@ use massa_models::{ operation::{Operation, OperationId, OperationType}, slot::Slot, }; +use massa_proto::massa::api::v1 as grpc; use massa_sdk::Client; use massa_signature::KeyPair; use massa_time::MassaTime; use massa_wallet::Wallet; + use serde::Serialize; use std::collections::{BTreeMap, HashMap, HashSet}; use std::fmt::Write as _; @@ -439,7 +441,7 @@ impl Command { /// it means that we don't want to print anything we just want the json output pub(crate) async fn run( &self, - client: &Client, + client: &mut Client, wallet_opt: &mut Option, parameters: &[String], json: bool, @@ -769,16 +771,61 @@ impl Command { Command::wallet_generate_secret_key => { let wallet = wallet_opt.as_mut().unwrap(); - let key = KeyPair::generate(); - let ad = wallet.add_keypairs(vec![key])?[0]; - if json { - Ok(Box::new(ad.to_string())) + // In order to generate a KeyPair we need to get the MIP statuses and use the latest + // active version + let req = grpc::GetMipStatusRequest { id: "".to_string() }; + + if let Some(ref mut grpc) = client.grpc { + let versioning_status = match grpc.get_mip_status(req).await { + Ok(resp_) => { + let resp = resp_.into_inner(); + resp.entry + } + Err(e) => { + grpc_error!(e) + } + }; + + // Note: this is a bit duplicated with KeyPairFactory code but it avoids + // sending the whole mip store through the API + let keypair_version = versioning_status + .into_iter() + .rev() + .find_map(|entry| { + let is_about_keypair = match entry.mip_info { + None => false, + Some(mip_info) => mip_info.components.iter().any(|st_entry| { + grpc::MipComponent::from_i32(st_entry.kind) + .unwrap_or(grpc::MipComponent::Unspecified) + == grpc::MipComponent::Keypair + }), + }; + + let state = grpc::ComponentStateId::from_i32(entry.state_id) + .unwrap_or(grpc::ComponentStateId::Error); + + if is_about_keypair && state == grpc::ComponentStateId::Active { + Some(entry.state_id) + } else { + None + } + }) + .unwrap_or(0); + let key = KeyPair::generate(keypair_version as u64) + .expect("Unable to generate key pair"); + + let ad = wallet.add_keypairs(vec![key])?[0]; + if json { + Ok(Box::new(ad.to_string())) + } else { + println!("Generated {} address and added it to the wallet", ad); + println!( + "Type `wallet_info` to show wallet info (keys, addresses, balances ...) and/or `node_add_staking_secret_keys ` to start staking with this key.\n" + ); + Ok(Box::new(())) + } } else { - println!("Generated {} address and added it to the wallet", ad); - println!( - "Type `node_start_staking
` to start staking with this address.\n" - ); - Ok(Box::new(())) + bail!("GRPC is not enabled"); } } diff --git a/massa-client/src/display.rs b/massa-client/src/display.rs index 230936b9457..47b1bc462a5 100644 --- a/massa-client/src/display.rs +++ b/massa-client/src/display.rs @@ -41,6 +41,14 @@ macro_rules! rpc_error { }; } +#[macro_export] +/// bail a shinny RPC error +macro_rules! grpc_error { + ($e:expr) => { + bail!("check if your node is running and grpc api enabled: {}", $e) + }; +} + #[macro_export] /// print a yellow warning macro_rules! client_warning { @@ -223,7 +231,7 @@ impl Output for NodeStatus { self.config.pretty_print(); println!(); - println!("Current time: {}", self.current_time.to_utc_string()); + println!("Current time: {}", self.current_time.format_instant()); println!( "Current cycle: {}", Style::Protocol.style(self.current_cycle) @@ -272,11 +280,11 @@ impl Output for ExecutionStats { println!("Execution stats:"); println!( "\tStart stats timespan time: {}", - Style::Time.style(self.time_window_start.to_utc_string()) + Style::Time.style(self.time_window_start.format_instant()) ); println!( "\tEnd stats timespan time: {}", - Style::Time.style(self.time_window_end.to_utc_string()) + Style::Time.style(self.time_window_end.format_instant()) ); println!( "\tFinal executed block count: {}", @@ -324,10 +332,10 @@ impl Output for CompactConfig { println!("Config:"); println!( "\tGenesis time: {}", - Style::Time.style(self.genesis_timestamp.to_utc_string()) + Style::Time.style(self.genesis_timestamp.format_instant()) ); if let Some(end) = self.end_timestamp { - println!("\tEnd time: {}", Style::Time.style(end.to_utc_string())); + println!("\tEnd time: {}", Style::Time.style(end.format_instant())); } println!( "\tThread count: {}", @@ -344,10 +352,6 @@ impl Output for CompactConfig { Style::Protocol.style(self.periods_per_cycle) ); println!("\tBlock reward: {}", Style::Coins.style(self.block_reward)); - println!( - "\tPeriods per cycle: {}", - Style::Protocol.style(self.periods_per_cycle) - ); println!("\tRoll price: {}", Style::Coins.style(self.roll_price)); println!( "\tMax block size (in bytes): {}", @@ -361,11 +365,11 @@ impl Output for ConsensusStats { println!("Consensus stats:"); println!( "\tStart stats timespan time: {}", - Style::Time.style(self.start_timespan.to_utc_string()) + Style::Time.style(self.start_timespan.format_instant()) ); println!( "\tEnd stats timespan time: {}", - Style::Time.style(self.end_timespan.to_utc_string()) + Style::Time.style(self.end_timespan.format_instant()) ); println!( "\tFinal block count: {}", diff --git a/massa-client/src/main.rs b/massa-client/src/main.rs index d5949e9c604..4bd72599f33 100644 --- a/massa-client/src/main.rs +++ b/massa-client/src/main.rs @@ -33,6 +33,9 @@ struct Args { /// Port to listen on (Massa private API). #[structopt(long)] private_port: Option, + /// Port to listen on (Massa GRPC API). + #[structopt(long)] + grpc_port: Option, /// Address to listen on #[structopt(long)] ip: Option, @@ -126,6 +129,10 @@ async fn run(args: Args) -> Result<()> { Some(private_port) => private_port, None => settings.default_node.private_port, }; + let grpc_port = match args.grpc_port { + Some(grpc_port) => grpc_port, + None => settings.default_node.grpc_port, + }; // Setup panic handlers, // and when a panic occurs, @@ -137,10 +144,12 @@ async fn run(args: Args) -> Result<()> { std::process::exit(1); })); - let client = Client::new(address, public_port, private_port, &http_config).await; + // Note: grpc handler requires a mut handler + let mut client = + Client::new(address, public_port, private_port, grpc_port, &http_config).await?; if atty::is(Stream::Stdout) && args.command == Command::help && !args.json { // Interactive mode - repl::run(&client, &args.wallet, args.password).await?; + repl::run(&mut client, &args.wallet, args.password).await?; } else { // Non-Interactive mode @@ -161,7 +170,7 @@ async fn run(args: Args) -> Result<()> { match args .command - .run(&client, &mut wallet_opt, &args.parameters, args.json) + .run(&mut client, &mut wallet_opt, &args.parameters, args.json) .await { Ok(output) => { diff --git a/massa-client/src/repl.rs b/massa-client/src/repl.rs index 816e36074b7..bebcb98ce4f 100644 --- a/massa-client/src/repl.rs +++ b/massa-client/src/repl.rs @@ -77,7 +77,7 @@ struct MyHelper { } pub(crate) async fn run( - client: &Client, + client: &mut Client, wallet_path: &Path, args_password: Option, ) -> Result<()> { diff --git a/massa-client/src/settings.rs b/massa-client/src/settings.rs index c35f7d9bc6f..53c8fbe9ee9 100644 --- a/massa-client/src/settings.rs +++ b/massa-client/src/settings.rs @@ -24,6 +24,7 @@ pub struct DefaultNode { pub ip: IpAddr, pub private_port: u16, pub public_port: u16, + pub grpc_port: u16, } /// Client settings diff --git a/massa-consensus-exports/Cargo.toml b/massa-consensus-exports/Cargo.toml index 617bec0edae..95030cfbd44 100644 --- a/massa-consensus-exports/Cargo.toml +++ b/massa-consensus-exports/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_consensus_exports" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -9,7 +9,7 @@ edition = "2021" [dependencies] crossbeam-channel = "0.5.6" displaydoc = "0.2" -nom = "7.1" +nom = "=7.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" diff --git a/massa-consensus-exports/src/export_active_block.rs b/massa-consensus-exports/src/export_active_block.rs index 9cad74f2a53..e36dc43df19 100644 --- a/massa-consensus-exports/src/export_active_block.rs +++ b/massa-consensus-exports/src/export_active_block.rs @@ -166,7 +166,7 @@ impl Deserializer for ExportActiveBlockDeserializer { /// use massa_signature::KeyPair; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let parents = (0..THREAD_COUNT) /// .map(|i| BlockId(Hash::compute_from(&[i]))) /// .collect(); @@ -174,6 +174,8 @@ impl Deserializer for ExportActiveBlockDeserializer { /// // create block header /// let orig_header = BlockHeader::new_verifiable( /// BlockHeader { + /// current_version: 0, + /// announced_version: 0, /// slot: Slot::new(1, 1), /// parents, /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), diff --git a/massa-consensus-exports/src/settings.rs b/massa-consensus-exports/src/settings.rs index de87d4b2a00..cb7a8421baa 100644 --- a/massa-consensus-exports/src/settings.rs +++ b/massa-consensus-exports/src/settings.rs @@ -14,18 +14,12 @@ pub struct ConsensusConfig { pub genesis_key: KeyPair, /// Maximum number of blocks allowed in discarded blocks. pub max_discarded_blocks: usize, - /// If a block `is future_block_processing_max_periods` periods in the future, it is just discarded. - pub future_block_processing_max_periods: u64, /// Maximum number of blocks allowed in `FutureIncomingBlocks`. pub max_future_processing_blocks: usize, /// Maximum number of blocks allowed in `DependencyWaitingBlocks`. pub max_dependency_blocks: usize, - /// max event send wait - pub max_send_wait: MassaTime, /// old blocks are pruned every `block_db_prune_interval` pub block_db_prune_interval: MassaTime, - /// max number of items returned while querying - pub max_item_return_count: usize, /// Max gas per block for the execution configuration pub max_gas_per_block: u64, /// Threshold for fitness. @@ -36,6 +30,8 @@ pub struct ConsensusConfig { pub periods_per_cycle: u64, /// force keep at least this number of final periods in RAM for each thread pub force_keep_final_periods: u64, + /// force keep at least this number of final blocks without ops in RAM for each thread + pub force_keep_final_periods_without_ops: u64, /// target number of endorsement per block pub endorsement_count: u32, /// TESTNET: time when the blockclique is ended. diff --git a/massa-consensus-exports/src/test_exports/config.rs b/massa-consensus-exports/src/test_exports/config.rs index b2a9bad75c2..7d596a3cbbe 100644 --- a/massa-consensus-exports/src/test_exports/config.rs +++ b/massa-consensus-exports/src/test_exports/config.rs @@ -17,17 +17,15 @@ impl Default for ConsensusConfig { thread_count: THREAD_COUNT, genesis_key: GENESIS_KEY.clone(), max_discarded_blocks: 10000, - future_block_processing_max_periods: 100, max_future_processing_blocks: 100, max_dependency_blocks: 2048, - max_send_wait: MassaTime::from_millis(100), block_db_prune_interval: MassaTime::from_millis(5000), - max_item_return_count: 100, max_gas_per_block: MAX_GAS_PER_BLOCK, delta_f0: DELTA_F0, operation_validity_periods: OPERATION_VALIDITY_PERIODS, periods_per_cycle: PERIODS_PER_CYCLE, force_keep_final_periods: 20, + force_keep_final_periods_without_ops: 128, endorsement_count: ENDORSEMENT_COUNT, end_timestamp: None, stats_timespan: MassaTime::from_millis(60000), diff --git a/massa-consensus-worker/Cargo.toml b/massa-consensus-worker/Cargo.toml index a1cc2dd58ed..9cc532807a8 100644 --- a/massa-consensus-worker/Cargo.toml +++ b/massa-consensus-worker/Cargo.toml @@ -1,17 +1,14 @@ [package] name = "massa_consensus_worker" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -displaydoc = "0.2" num = { version = "0.4", features = ["serde"] } -tracing = "0.1" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" +tracing = { version = "0.1", features = ["log"] } parking_lot = { version = "0.12", features = ["deadlock_detection"] } #custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } @@ -22,6 +19,10 @@ massa_time = { path = "../massa-time" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } -[features] +[dev-dependencies] +rand= "0.8" +itertools = "0.10" -sandbox = [] \ No newline at end of file +[features] +sandbox = [] +bootstrap_server = [] diff --git a/massa-consensus-worker/src/state/clique_computation.rs b/massa-consensus-worker/src/state/clique_computation.rs new file mode 100644 index 00000000000..3064ba10080 --- /dev/null +++ b/massa-consensus-worker/src/state/clique_computation.rs @@ -0,0 +1,157 @@ +// Copyright (c) 2022 MASSA LABS + +//! This file is responsible for clique computation + +use massa_models::{ + block_id::BlockId, + prehash::{PreHashMap, PreHashSet}, +}; + +/// Computes max cliques of compatible blocks +pub fn compute_max_cliques( + gi_head: &PreHashMap>, +) -> Vec> { + let mut max_cliques: Vec> = Vec::new(); + + // algorithm adapted from IK_GPX as summarized in: + // Cazals et al., "A note on the problem of reporting maximal cliques" + // Theoretical Computer Science, 2008 + // https://doi.org/10.1016/j.tcs.2008.05.010 + + // stack: r, p, x + let mut stack: Vec<( + PreHashSet, + PreHashSet, + PreHashSet, + )> = vec![( + PreHashSet::::default(), + gi_head.keys().cloned().collect(), + PreHashSet::::default(), + )]; + while let Some((r, mut p, mut x)) = stack.pop() { + if p.is_empty() && x.is_empty() { + max_cliques.push(r); + continue; + } + // choose the pivot vertex following the GPX scheme: + // u_p = node from (p \/ x) that maximizes the cardinality of (P \ Neighbors(u_p, GI)) + let &u_p = p + .union(&x) + .max_by_key(|&u| { + p.difference(&(&gi_head[u] | &vec![*u].into_iter().collect())) + .count() + }) + .unwrap(); // p was checked to be non-empty before + + // iterate over u_set = (p /\ Neighbors(u_p, GI)) + let u_set: PreHashSet = &p & &(&gi_head[&u_p] | &vec![u_p].into_iter().collect()); + for u_i in u_set.into_iter() { + p.remove(&u_i); + let u_i_set: PreHashSet = vec![u_i].into_iter().collect(); + let comp_n_u_i: PreHashSet = &gi_head[&u_i] | &u_i_set; + stack.push((&r | &u_i_set, &p - &comp_n_u_i, &x - &comp_n_u_i)); + x.insert(u_i); + } + } + max_cliques +} + +/// Tests + +#[cfg(test)] +mod tests { + use crate::state::clique_computation::compute_max_cliques; + use itertools::Itertools; + use massa_models::{ + block_id::BlockId, + prehash::{PreHashMap, PreHashSet}, + }; + use rand::Rng; + + #[test] + fn test_compute_max_cliques() { + // Define the maximum size of the graph and the number of iterations + const MAX_SIZE: usize = 10; + const ITERATIONS: usize = 1000; + + // Generate random test cases and run the algorithm + let mut rng = rand::thread_rng(); + + for _ in 0..ITERATIONS { + // Generate a random graph size + let size = rng.gen_range(0..=MAX_SIZE); + + // Generate random incompatibility relationships + let mut gi_head = PreHashMap::default(); + for i in 0..size { + gi_head.insert( + BlockId::from_bytes( + massa_hash::Hash::compute_from(&i.to_be_bytes()).to_bytes(), + ), + PreHashSet::default(), + ); + } + for i in 0..size.saturating_sub(1) { + for j in (i + 1)..size { + // Generate a random compatibility relationship + let is_compatible = rng.gen_bool(0.5); + + if !is_compatible { + let i_id = BlockId::from_bytes( + massa_hash::Hash::compute_from(&i.to_be_bytes()).to_bytes(), + ); + let j_id = BlockId::from_bytes( + massa_hash::Hash::compute_from(&j.to_be_bytes()).to_bytes(), + ); + // Add the incompatibility relationship to gi_head + gi_head.entry(i_id).or_default().insert(j_id); + gi_head.entry(j_id).or_default().insert(i_id); + } + } + } + + // Check cliques + assert_cliques_valid(&gi_head, &compute_max_cliques(&gi_head)); + } + } + + /// Assert that a set of cliques is valid + fn assert_cliques_valid( + gi_head: &PreHashMap>, + max_cliques: &Vec>, + ) { + // Check that there is at least one clique + if max_cliques.is_empty() { + panic!("max_cliques is empty"); + } + + // Check that all cliques are unique + for (i, clique1) in max_cliques.iter().enumerate() { + for (j, clique2) in max_cliques.iter().enumerate() { + if i != j && clique1 == clique2 { + panic!("two of the cliques are identical"); + } + } + } + + for clique in max_cliques { + // Check that all pairs of vertices in the clique are compatible + for (v1, v2) in clique.iter().tuple_combinations() { + if gi_head[&v1].contains(&v2) || gi_head[&v2].contains(&v1) { + panic!("incompatible vertices found within the same clique"); + } + } + + // Check that the clique is maximal + for v in gi_head.keys() { + if !clique.contains(v) { + if clique.iter().all(|c| !gi_head[&v].contains(&c)) { + panic!("a clique is non-maximal"); + } + } + } + } + + // All cliques are valid, unique and maximal + } +} diff --git a/massa-consensus-worker/src/state/graph.rs b/massa-consensus-worker/src/state/graph.rs index b9fa9085d43..8ea8af090fe 100644 --- a/massa-consensus-worker/src/state/graph.rs +++ b/massa-consensus-worker/src/state/graph.rs @@ -241,58 +241,6 @@ impl ConsensusState { Ok(final_blocks) } - /// Computes max cliques of compatible blocks - pub fn compute_max_cliques(&self) -> Vec> { - let mut max_cliques: Vec> = Vec::new(); - - // algorithm adapted from IK_GPX as summarized in: - // Cazals et al., "A note on the problem of reporting maximal cliques" - // Theoretical Computer Science, 2008 - // https://doi.org/10.1016/j.tcs.2008.05.010 - - // stack: r, p, x - let mut stack: Vec<( - PreHashSet, - PreHashSet, - PreHashSet, - )> = vec![( - PreHashSet::::default(), - self.gi_head.keys().cloned().collect(), - PreHashSet::::default(), - )]; - while let Some((r, mut p, mut x)) = stack.pop() { - if p.is_empty() && x.is_empty() { - max_cliques.push(r); - continue; - } - // choose the pivot vertex following the GPX scheme: - // u_p = node from (p \/ x) that maximizes the cardinality of (P \ Neighbors(u_p, GI)) - let &u_p = p - .union(&x) - .max_by_key(|&u| { - p.difference(&(&self.gi_head[u] | &vec![*u].into_iter().collect())) - .count() - }) - .unwrap(); // p was checked to be non-empty before - - // iterate over u_set = (p /\ Neighbors(u_p, GI)) - let u_set: PreHashSet = - &p & &(&self.gi_head[&u_p] | &vec![u_p].into_iter().collect()); - for u_i in u_set.into_iter() { - p.remove(&u_i); - let u_i_set: PreHashSet = vec![u_i].into_iter().collect(); - let comp_n_u_i: PreHashSet = &self.gi_head[&u_i] | &u_i_set; - stack.push((&r | &u_i_set, &p - &comp_n_u_i, &x - &comp_n_u_i)); - x.insert(u_i); - } - } - if max_cliques.is_empty() { - // make sure at least one clique remains - max_cliques = vec![PreHashSet::::default()]; - } - max_cliques - } - /// get the clique of higher fitness pub fn get_blockclique(&self) -> PreHashSet { self.max_cliques diff --git a/massa-consensus-worker/src/state/mod.rs b/massa-consensus-worker/src/state/mod.rs index 3254f42db3c..51538f6e0b5 100644 --- a/massa-consensus-worker/src/state/mod.rs +++ b/massa-consensus-worker/src/state/mod.rs @@ -23,6 +23,7 @@ use massa_storage::Storage; use massa_time::MassaTime; use tracing::debug; +mod clique_computation; mod graph; mod process; mod process_commands; @@ -48,6 +49,8 @@ pub struct ConsensusState { pub max_cliques: Vec, /// ids of active blocks pub active_index: PreHashSet, + /// ids of active blocks without ops + pub active_index_without_ops: PreHashSet, /// Save of latest periods pub save_final_periods: Vec, /// One (block id, period) per thread @@ -107,7 +110,7 @@ impl ConsensusState { /// Get a full active block /// /// Returns an error if it was not found - pub fn try_get_full_active_block( + fn try_get_full_active_block( &self, block_id: &BlockId, ) -> Result<(&ActiveBlock, &Storage), ConsensusError> { diff --git a/massa-consensus-worker/src/state/process.rs b/massa-consensus-worker/src/state/process.rs index 00ff6ed1459..98c6eec3932 100644 --- a/massa-consensus-worker/src/state/process.rs +++ b/massa-consensus-worker/src/state/process.rs @@ -22,6 +22,8 @@ use massa_storage::Storage; use massa_time::MassaTime; use tracing::log::{debug, info}; +use crate::state::clique_computation::compute_max_cliques; + use super::ConsensusState; /// All informations necessary to add a block to the graph @@ -74,7 +76,7 @@ impl ConsensusState { /// /// # Returns: /// A list of items to re-ack and process or an error if the process of an item failed - pub fn process( + fn process( &mut self, block_id: BlockId, current_slot: Option, @@ -398,8 +400,7 @@ impl ConsensusState { { "hash": add_block_id } ); let before = self.max_cliques.len(); - self.max_cliques = self - .compute_max_cliques() + self.max_cliques = compute_max_cliques(&self.gi_head) .into_iter() .map(|c| Clique { block_ids: c, diff --git a/massa-consensus-worker/src/state/process_commands.rs b/massa-consensus-worker/src/state/process_commands.rs index 5fec3eebfbc..636b7a50ccd 100644 --- a/massa-consensus-worker/src/state/process_commands.rs +++ b/massa-consensus-worker/src/state/process_commands.rs @@ -165,7 +165,22 @@ impl ConsensusState { let reason = DiscardReason::Invalid("invalid".to_string()); self.maybe_note_attack_attempt(&reason, block_id); massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); - + match self.block_statuses.get(block_id) { + Some(BlockStatus::WaitingForDependencies { .. }) => { + self.waiting_for_dependencies_index.remove(block_id); + } + Some(BlockStatus::WaitingForSlot(_)) => { + self.waiting_for_slot_index.remove(block_id); + } + Some(BlockStatus::Incoming(_)) => { + self.incoming_index.remove(block_id); + } + Some(BlockStatus::Active { .. }) => { + self.active_index.remove(block_id); + } + Some(BlockStatus::Discarded { .. }) => {} + None => {} + }; // add to discard self.block_statuses.insert( *block_id, diff --git a/massa-consensus-worker/src/state/prune.rs b/massa-consensus-worker/src/state/prune.rs index da2b8fe1b25..bcce202cf0c 100644 --- a/massa-consensus-worker/src/state/prune.rs +++ b/massa-consensus-worker/src/state/prune.rs @@ -24,15 +24,26 @@ impl ConsensusState { for a_block in self.active_index.iter() { if let Some(BlockStatus::Active { a_block: active_block, - .. - }) = self.block_statuses.get(a_block) + storage, + }) = self.block_statuses.get_mut(a_block) { let (_b_id, latest_final_period) = self.latest_final_blocks_periods[active_block.slot.thread as usize]; + if active_block.slot.period - >= latest_final_period.saturating_sub(self.config.force_keep_final_periods) + >= latest_final_period + .saturating_sub(self.config.force_keep_final_periods_without_ops) { retain_active.insert(*a_block); + if active_block.slot.period + < latest_final_period.saturating_sub(self.config.force_keep_final_periods) + && !self.active_index_without_ops.contains(a_block) + { + self.active_index_without_ops.insert(*a_block); + storage.drop_operation_refs(&storage.get_op_refs().clone()); + } + } else { + self.active_index_without_ops.remove(a_block); } } } diff --git a/massa-consensus-worker/src/state/stats.rs b/massa-consensus-worker/src/state/stats.rs index c41db519538..53d51c1889b 100644 --- a/massa-consensus-worker/src/state/stats.rs +++ b/massa-consensus-worker/src/state/stats.rs @@ -82,7 +82,7 @@ impl ConsensusState { } /// Remove old stats from consensus storage - pub fn prune_stats(&mut self) -> Result<(), ConsensusError> { + fn prune_stats(&mut self) -> Result<(), ConsensusError> { let start_time = MassaTime::now()?.saturating_sub(self.stats_history_timespan); while let Some((t, _, _)) = self.final_block_stats.front() { if t < &start_time { diff --git a/massa-consensus-worker/src/state/tick.rs b/massa-consensus-worker/src/state/tick.rs index f82be21626a..1cf6a597f9e 100644 --- a/massa-consensus-worker/src/state/tick.rs +++ b/massa-consensus-worker/src/state/tick.rs @@ -47,6 +47,33 @@ impl ConsensusState { // take care of block db changes self.block_db_changed()?; + // Simulate downtime + use massa_models::config::constants::{ + DOWNTIME_END_TIMESTAMP, DOWNTIME_END_TIMESTAMP_BOOTSTRAP, DOWNTIME_START_TIMESTAMP, + }; + + let now = massa_time::MassaTime::now().expect("could not get now time"); + + // last_start_period should be set to trigger after the DOWNTIME_END_TIMESTAMP + let start_time = DOWNTIME_START_TIMESTAMP; + let end_time = if cfg!(feature = "bootstrap_server") { + DOWNTIME_END_TIMESTAMP_BOOTSTRAP + } else { + DOWNTIME_END_TIMESTAMP + }; + + if now >= start_time && now <= end_time { + let (days, hours, mins, secs) = DOWNTIME_END_TIMESTAMP + .saturating_sub(now) + .days_hours_mins_secs() + .unwrap(); + + panic!( + "We are in downtime! {} days, {} hours, {} minutes, {} seconds remaining to the end of the downtime", + days, hours, mins, secs, + ); + } + Ok(()) } } diff --git a/massa-consensus-worker/src/state/verifications.rs b/massa-consensus-worker/src/state/verifications.rs index fd5b254bfb5..c89f9c90ce5 100644 --- a/massa-consensus-worker/src/state/verifications.rs +++ b/massa-consensus-worker/src/state/verifications.rs @@ -49,7 +49,7 @@ impl ConsensusState { // Verify that we haven't already received 2 blocks for this slot // If the block isn't already present two times we save it and return false // If the block is already present two times we return true - pub(crate) fn detect_multistake(&mut self, header: &SecuredHeader) -> bool { + fn detect_multistake(&mut self, header: &SecuredHeader) -> bool { let entry = self .nonfinal_active_blocks_per_slot .entry(header.content.slot) @@ -292,10 +292,7 @@ impl ConsensusState { /// - Slot above 0. /// - Valid thread. /// - Check that the block is older than the latest final one in thread. - /// - Check that the block slot is not too much into the future, - /// as determined by the configuration `future_block_processing_max_periods`. /// - Check if it was the creator's turn to create this block. - /// - TODO: check for double staking. /// - Check parents are present. /// - Check the topological consistency of the parents. /// - Check endorsements. @@ -303,7 +300,7 @@ impl ConsensusState { /// - Check grandpa incompatibility test. /// - Check if the block is incompatible with a parent. /// - Check if the block is incompatible with a final block. - pub fn check_header( + fn check_header( &self, block_id: &BlockId, header: &SecuredHeader, @@ -326,17 +323,6 @@ impl ConsensusState { return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); } - // check if block slot is too much in the future - if let Some(cur_slot) = current_slot { - if header.content.slot.period - > cur_slot - .period - .saturating_add(self.config.future_block_processing_max_periods) - { - return Ok(HeaderCheckOutcome::WaitForSlot); - } - } - // check if it was the creator's turn to create this block // (step 1 in consensus/pos.md) let slot_draw_address = match self diff --git a/massa-consensus-worker/src/worker/init.rs b/massa-consensus-worker/src/worker/init.rs index 45bc2bf92ea..ab381e5277f 100644 --- a/massa-consensus-worker/src/worker/init.rs +++ b/massa-consensus-worker/src/worker/init.rs @@ -41,7 +41,10 @@ pub fn create_genesis_block( ) -> Result { let keypair = &cfg.genesis_key; let header = BlockHeader::new_verifiable( + // VERSIONNING TODO: what to implement here in case of restart? BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(cfg.last_start_period, thread_number), parents: Vec::new(), operation_merkle_root: Hash::compute_from(&Vec::new()), @@ -132,7 +135,7 @@ impl ConsensusWorker { info!( "Started node at time {}, cycle {}, period {}, thread {}", - now.to_utc_string(), + now.format_instant(), next_slot.get_cycle(config.periods_per_cycle), next_slot.period, next_slot.thread, diff --git a/massa-consensus-worker/src/worker/mod.rs b/massa-consensus-worker/src/worker/mod.rs index 281da8c0e60..44615054c88 100644 --- a/massa-consensus-worker/src/worker/mod.rs +++ b/massa-consensus-worker/src/worker/mod.rs @@ -79,6 +79,7 @@ pub fn start_consensus_worker( new_stale_blocks: Default::default(), incoming_index: Default::default(), active_index: Default::default(), + active_index_without_ops: Default::default(), save_final_periods: Default::default(), latest_final_blocks_periods: Default::default(), best_parents: Default::default(), diff --git a/massa-db/Cargo.toml b/massa-db/Cargo.toml new file mode 100644 index 00000000000..b06c258172a --- /dev/null +++ b/massa-db/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "massa_db" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +parking_lot = { version = "0.12", features = ["deadlock_detection"] } +rocksdb = "0.20" +displaydoc = "0.2" +thiserror = "1.0" +lsmtree = "=0.1.1" + +# Custom modules +massa_hash = { path = "../massa-hash" } +massa_models = { path = "../massa-models" } +massa_serialization = { path = "../massa-serialization" } diff --git a/massa-db/src/constants.rs b/massa-db/src/constants.rs new file mode 100644 index 00000000000..d74b41e186c --- /dev/null +++ b/massa-db/src/constants.rs @@ -0,0 +1,85 @@ +use massa_hash::HASH_SIZE_BYTES; + +// Commons +pub const LSMTREE_NODES_CF: &str = "lsmtree_nodes"; +pub const LSMTREE_VALUES_CF: &str = "lsmtree_values"; +pub const METADATA_CF: &str = "metadata"; +pub const STATE_CF: &str = "state"; +pub const VERSIONING_CF: &str = "versioning"; + +pub const STATE_HASH_KEY: &[u8; 1] = b"h"; +pub const STATE_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; +pub const CHANGE_ID_KEY: &[u8; 1] = b"c"; + +pub const CHANGE_ID_DESER_ERROR: &str = "critical: change_id deserialization failed"; +pub const CHANGE_ID_SER_ERROR: &str = "critical: change_id serialization failed"; + +// Errors +pub const CF_ERROR: &str = "critical: rocksdb column family operation failed"; +pub const OPEN_ERROR: &str = "critical: rocksdb open operation failed"; +pub const LSMTREE_ERROR: &str = "critical: lsmtree insert / remove open operation failed"; +pub const CRUD_ERROR: &str = "critical: rocksdb crud operation failed"; +pub const STATE_HASH_ERROR: &str = "critical: saved state hash is corrupted"; + +// Prefixes +pub const CYCLE_HISTORY_PREFIX: &str = "cycle_history/"; +pub const DEFERRED_CREDITS_PREFIX: &str = "deferred_credits/"; +pub const ASYNC_POOL_PREFIX: &str = "async_pool/"; +pub const EXECUTED_OPS_PREFIX: &str = "executed_ops/"; +pub const EXECUTED_DENUNCIATIONS_PREFIX: &str = "executed_denunciations/"; +pub const LEDGER_PREFIX: &str = "ledger/"; +pub const MIP_STORE_PREFIX: &str = "versioning/"; +pub const MIP_STORE_STATS_PREFIX: &str = "versioning_stats/"; + +// Async Pool +pub const ASYNC_POOL_HASH_ERROR: &str = "critical: saved async pool hash is corrupted"; +pub const ASYNC_POOL_HASH_KEY: &[u8; 4] = b"ap_h"; +pub const ASYNC_POOL_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; + +pub const MESSAGE_DESER_ERROR: &str = "critical: message deserialization failed"; +pub const MESSAGE_SER_ERROR: &str = "critical: message serialization failed"; +pub const MESSAGE_ID_DESER_ERROR: &str = "critical: message_id deserialization failed"; +pub const MESSAGE_ID_SER_ERROR: &str = "critical: message_id serialization failed"; + +// PosState +pub const CYCLE_HISTORY_HASH_ERROR: &str = "critical: saved cycle_history hash is corrupted"; +pub const CYCLE_HISTORY_HASH_KEY: &[u8; 4] = b"ch_h"; +pub const CYCLE_HISTORY_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; + +pub const CYCLE_HISTORY_DESER_ERROR: &str = "critical: cycle_history deserialization failed"; +pub const CYCLE_HISTORY_SER_ERROR: &str = "critical: cycle_history serialization failed"; + +pub const DEFERRED_CREDITS_HASH_ERROR: &str = "critical: saved deferred_credits hash is corrupted"; +pub const DEFERRED_CREDITS_HASH_KEY: &[u8; 4] = b"dc_h"; +pub const DEFERRED_CREDITS_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; + +pub const DEFERRED_CREDITS_DESER_ERROR: &str = "critical: deferred_credits deserialization failed"; +pub const DEFERRED_CREDITS_SER_ERROR: &str = "critical: deferred_credits serialization failed"; + +// Executed Ops +pub const EXECUTED_OPS_HASH_ERROR: &str = "critical: saved executed_ops hash is corrupted"; +pub const EXECUTED_OPS_HASH_KEY: &[u8; 4] = b"eo_h"; +pub const EXECUTED_OPS_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; + +pub const EXECUTED_OPS_ID_DESER_ERROR: &str = "critical: executed_ops_id deserialization failed"; +pub const EXECUTED_OPS_ID_SER_ERROR: &str = "critical: executed_ops_id serialization failed"; + +// Executed Denunciations +pub const EXECUTED_DENUNCIATIONS_HASH_ERROR: &str = + "critical: saved executed_denunciations hash is corrupted"; +pub const EXECUTED_DENUNCIATIONS_HASH_KEY: &[u8; 4] = b"ed_h"; +pub const EXECUTED_DENUNCIATIONS_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; + +pub const EXECUTED_DENUNCIATIONS_INDEX_DESER_ERROR: &str = + "critical: executed_denunciations_index deserialization failed"; +pub const EXECUTED_DENUNCIATIONS_INDEX_SER_ERROR: &str = + "critical: executed_denunciations_index serialization failed"; + +// Ledger +pub const LEDGER_HASH_ERROR: &str = "critical: saved ledger hash is corrupted"; +pub const LEDGER_HASH_KEY: &[u8; 3] = b"l_h"; +pub const LEDGER_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; + +pub const KEY_DESER_ERROR: &str = "critical: key deserialization failed"; +pub const KEY_SER_ERROR: &str = "critical: key serialization failed"; +pub const KEY_LEN_SER_ERROR: &str = "critical: key length serialization failed"; diff --git a/massa-db/src/error.rs b/massa-db/src/error.rs new file mode 100644 index 00000000000..c65b1b76cef --- /dev/null +++ b/massa-db/src/error.rs @@ -0,0 +1,20 @@ +//! Copyright (c) 2022 MASSA LABS + +//! This file defines all error types for final state management + +use displaydoc::Display; +use thiserror::Error; + +/// Massa DB error +#[non_exhaustive] +#[derive(Display, Error, Debug)] +pub enum MassaDBError { + /// invalid ChangeID: {0} + InvalidChangeID(String), + /// time error: {0} + TimeError(String), + /// rocks db error: {0} + RocksDBError(String), + /// hash error: {0} + HashError(String), +} diff --git a/massa-db/src/lib.rs b/massa-db/src/lib.rs new file mode 100644 index 00000000000..eabb86d170b --- /dev/null +++ b/massa-db/src/lib.rs @@ -0,0 +1,9 @@ +#![feature(btree_cursors)] + +mod constants; +mod error; +mod massa_db; + +pub use crate::massa_db::*; +pub use constants::*; +pub use error::*; diff --git a/massa-db/src/massa_db.rs b/massa-db/src/massa_db.rs new file mode 100644 index 00000000000..f881bc0d45f --- /dev/null +++ b/massa-db/src/massa_db.rs @@ -0,0 +1,748 @@ +use crate::{ + MassaDBError, CF_ERROR, CHANGE_ID_DESER_ERROR, CHANGE_ID_KEY, CHANGE_ID_SER_ERROR, CRUD_ERROR, + LSMTREE_ERROR, LSMTREE_NODES_CF, LSMTREE_VALUES_CF, METADATA_CF, OPEN_ERROR, STATE_CF, + STATE_HASH_ERROR, STATE_HASH_INITIAL_BYTES, STATE_HASH_KEY, VERSIONING_CF, +}; +use lsmtree::{bytes::Bytes, BadProof, KVStore, SparseMerkleTree}; +use massa_hash::{Hash, SmtHasher}; +use massa_models::{ + error::ModelsError, + slot::{Slot, SlotDeserializer, SlotSerializer}, + streaming_step::StreamingStep, +}; +use massa_serialization::{DeserializeError, Deserializer, Serializer}; +use parking_lot::{Mutex, RwLock}; +use rocksdb::{ + checkpoint::Checkpoint, ColumnFamilyDescriptor, Direction, IteratorMode, Options, WriteBatch, + DB, +}; +use std::{ + collections::{BTreeMap, HashMap}, + format, + ops::Bound::{self, Excluded, Included}, + path::PathBuf, + sync::Arc, +}; + +type Key = Vec; +type Value = Vec; + +/// Wrapped RocksDB database +/// +/// In our instance, we use Slot as the ChangeID +pub type MassaDB = RawMassaDB; + +/// We use batching to reduce the number of writes to the database +/// +/// Here, a DBBatch is a map from Key to Some(Value) for a new or updated value, or None for a deletion +pub type DBBatch = BTreeMap>; + +/// Config structure for a `MassaDBRaw` +#[derive(Debug, Clone)] +pub struct MassaDBConfig { + /// The path to the database, used in the wrapped RocksDB instance + pub path: PathBuf, + /// Change history to keep (indexed by ChangeID) + pub max_history_length: usize, + /// max_new_elements for bootstrap + pub max_new_elements: usize, + /// Thread count for slot serialization + pub thread_count: u8, +} + +/// A Batch of elements from the database, used by a bootstrap server. +#[derive(Debug, Clone)] +pub struct StreamBatch { + /// New elements to be streamed to the client. + pub new_elements: BTreeMap, + /// The changes made to previously streamed keys. Note that a None value can delete a given key. + pub updates_on_previous_elements: BTreeMap>, + /// The ChangeID associated with this batch, useful for syncing the changes not streamed yet to the client. + pub change_id: ChangeID, +} + +impl StreamBatch { + /// Helper function used to know if the main bootstrap state step is finished. + /// + /// Note: even after having an empty StreamBatch, we still need to send the updates on previous elements while bootstrap has not finished. + pub fn is_empty(&self) -> bool { + self.updates_on_previous_elements.is_empty() && self.new_elements.is_empty() + } +} +/// A generic wrapped RocksDB database. +/// +/// The added features are: +/// - Hash tracking with Lsm-tree, a Sparse Merkle Tree implementation +/// - Streaming the database while it is being actively updated +#[derive()] +pub struct RawMassaDB< + ChangeID: PartialOrd + Ord + PartialEq + Eq + Clone + std::fmt::Debug, + ChangeIDSerializer: Serializer, + ChangeIDDeserializer: Deserializer, +> { + /// The rocksdb instance + pub db: Arc, + /// configuration for the `RawMassaDB` + config: MassaDBConfig, + /// In change_history, we keep the latest changes made to the database, useful for streaming them to a client. + pub change_history: BTreeMap>>, + /// same as change_history but for versioning + pub change_history_versioning: BTreeMap>>, + /// A serializer for the ChangeID type + change_id_serializer: ChangeIDSerializer, + /// A deserializer for the ChangeID type + change_id_deserializer: ChangeIDDeserializer, + /// The Sparse Merkle Tree instance used to keep track of the global hash of the database + lsmtree: SparseMerkleTree, + /// The current RocksDB batch of the database, in a Mutex to share it with lsmtree + current_batch: Arc>, + /// The current RocksDB cache for this batch, useful for lsmtree + current_hashmap: SharedSmtCache, +} + +type SharedSmtCache = Arc>>>; + +/// Wrapper for the Lsm-tree database type +struct MassaDbLsmtree { + pub cf: &'static str, + pub db: Arc, + pub current_batch: Arc>, + pub current_hashmap: SharedSmtCache, +} + +impl MassaDbLsmtree { + /// Constructor for `MassaDbLsmtree` + pub fn new( + cf: &'static str, + db: Arc, + current_batch: Arc>, + current_hashmap: SharedSmtCache, + ) -> Self { + Self { + cf, + db, + current_batch, + current_hashmap, + } + } +} + +/// Implementation of the Database trait of Lsm-tree for our wrapper. +impl KVStore for MassaDbLsmtree { + type Hasher = SmtHasher; + type Error = BadProof; + + /// Get a value from the database, prioritizing the cache + fn get(&self, key: &[u8]) -> Result, Self::Error> { + let key: [u8; 32] = key.try_into().expect(LSMTREE_ERROR); + if let Some(val) = self.current_hashmap.read().get(&key) { + return Ok(val.clone()); + } + let handle_lsmtree = self.db.cf_handle(self.cf).expect(CF_ERROR); + let value = self + .db + .get_cf(handle_lsmtree, key) + .expect(CRUD_ERROR) + .map(Bytes::from); + self.current_hashmap.write().insert(key, value.clone()); + Ok(value) + } + + /// Set a value to the database (in a batch), and updating the cache + fn set(&mut self, key: Bytes, value: Bytes) -> Result<(), Self::Error> { + let key: [u8; 32] = key.to_vec().try_into().expect(LSMTREE_ERROR); + let handle_lsmtree = self.db.cf_handle(self.cf).expect(CF_ERROR); + self.current_batch + .lock() + .put_cf(handle_lsmtree, key, value.clone()); + self.current_hashmap.write().insert(key, Some(value)); + Ok(()) + } + + /// Remove a value from the database (in a batch), and updating the cache + fn remove(&mut self, key: &[u8]) -> Result { + let key: [u8; 32] = key.to_vec().try_into().expect(LSMTREE_ERROR); + let handle_lsmtree = self.db.cf_handle(self.cf).expect(CF_ERROR); + let val = self.get(&key)?.expect(LSMTREE_ERROR); + self.current_batch.lock().delete_cf(handle_lsmtree, key); + self.current_hashmap.write().insert(key, None); + Ok(val) + } + + /// Check if a key is in the database + fn contains(&self, key: &[u8]) -> Result { + let key: [u8; 32] = key.try_into().expect(LSMTREE_ERROR); + Ok(self.get(&key)?.is_some()) + } +} + +impl std::fmt::Debug + for RawMassaDB +where + ChangeID: PartialOrd + Ord + PartialEq + Eq + Clone + std::fmt::Debug, + ChangeIDSerializer: Serializer, + ChangeIDDeserializer: Deserializer, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RawMassaDB") + .field("db", &self.db) + .field("config", &self.config) + .field("change_history", &self.change_history) + .finish() + } +} + +impl + RawMassaDB +where + ChangeID: PartialOrd + Ord + PartialEq + Eq + Clone + std::fmt::Debug, + ChangeIDSerializer: Serializer, + ChangeIDDeserializer: Deserializer, +{ + /// Used for bootstrap servers (get a new batch to stream to the client) + /// + /// Returns a StreamBatch + pub fn get_batch_to_stream( + &self, + last_obtained: Option<(Vec, ChangeID)>, + ) -> Result, MassaDBError> { + let (updates_on_previous_elements, max_key) = if let Some((max_key, last_change_id)) = + last_obtained + { + match last_change_id.cmp(&self.get_change_id().expect(CHANGE_ID_DESER_ERROR)) { + std::cmp::Ordering::Greater => { + return Err(MassaDBError::TimeError(String::from( + "we don't have this change yet on this node (it's in the future for us)", + ))); + } + std::cmp::Ordering::Equal => { + (BTreeMap::new(), Some(max_key)) // no new updates + } + std::cmp::Ordering::Less => { + // We should send all the new updates since last_change_id + + let cursor = self + .change_history + .lower_bound(Bound::Excluded(&last_change_id)); + + if cursor.peek_prev().is_none() { + return Err(MassaDBError::TimeError(String::from( + "all our changes are strictly after last_change_id, we can't be sure we did not miss any", + ))); + } + + match cursor.key() { + Some(cursor_change_id) => { + // We have to send all the updates since cursor_change_id + // TODO_PR: check if / how we want to limit the number of updates we send. It may be needed but tricky to implement. + let mut updates: BTreeMap, Option>> = BTreeMap::new(); + let iter = self + .change_history + .range((Bound::Included(cursor_change_id), Bound::Unbounded)); + for (_change_id, changes) in iter { + updates.extend( + changes + .range(( + Bound::>::Unbounded, + Included(max_key.clone()), + )) + .map(|(k, v)| (k.clone(), v.clone())), + ); + } + (updates, Some(max_key)) + } + None => (BTreeMap::new(), Some(max_key)), // no new updates + } + } + } + } else { + (BTreeMap::new(), None) // we start from the beginning, so no updates on previous elements + }; + + let mut new_elements = BTreeMap::new(); + let handle = self.db.cf_handle(STATE_CF).expect(CF_ERROR); + + // Creates an iterator from the next element after the last if defined, otherwise initialize it at the first key. + let db_iterator = match max_key { + None => self.db.iterator_cf(handle, IteratorMode::Start), + Some(max_key) => { + let mut iter = self + .db + .iterator_cf(handle, IteratorMode::From(&max_key, Direction::Forward)); + iter.next(); + iter + } + }; + + for (serialized_key, serialized_value) in db_iterator.flatten() { + if new_elements.len() < self.config.max_new_elements { + new_elements.insert(serialized_key.to_vec(), serialized_value.to_vec()); + } else { + break; + } + } + + Ok(StreamBatch { + new_elements, + updates_on_previous_elements, + change_id: self.get_change_id().expect(CHANGE_ID_DESER_ERROR), + }) + } + + /// Used for bootstrap servers (get a new batch to stream to the client) + /// + /// Returns a StreamBatch + pub fn get_versioning_batch_to_stream( + &self, + last_obtained: Option<(Vec, ChangeID)>, + ) -> Result, MassaDBError> { + let (updates_on_previous_elements, max_key) = if let Some((max_key, last_change_id)) = + last_obtained + { + match last_change_id.cmp(&self.get_change_id().expect(CHANGE_ID_DESER_ERROR)) { + std::cmp::Ordering::Greater => { + return Err(MassaDBError::TimeError(String::from( + "we don't have this change yet on this node (it's in the future for us)", + ))); + } + std::cmp::Ordering::Equal => { + (BTreeMap::new(), Some(max_key)) // no new updates + } + std::cmp::Ordering::Less => { + // We should send all the new updates since last_change_id + + let cursor = self + .change_history + .lower_bound(Bound::Excluded(&last_change_id)); + + if cursor.peek_prev().is_none() { + return Err(MassaDBError::TimeError(String::from( + "all our changes are strictly after last_change_id, we can't be sure we did not miss any", + ))); + } + + match cursor.key() { + Some(cursor_change_id) => { + // We have to send all the updates since cursor_change_id + // TODO_PR: check if / how we want to limit the number of updates we send. It may be needed but tricky to implement. + let mut updates: BTreeMap, Option>> = BTreeMap::new(); + let iter = self + .change_history + .range((Bound::Included(cursor_change_id), Bound::Unbounded)); + for (_change_id, changes) in iter { + updates.extend( + changes + .range(( + Bound::>::Unbounded, + Included(max_key.clone()), + )) + .map(|(k, v)| (k.clone(), v.clone())), + ); + } + (updates, Some(max_key)) + } + None => (BTreeMap::new(), Some(max_key)), // no new updates + } + } + } + } else { + (BTreeMap::new(), None) // we start from the beginning, so no updates on previous elements + }; + + let mut new_elements = BTreeMap::new(); + let handle = self.db.cf_handle(VERSIONING_CF).expect(CF_ERROR); + + // Creates an iterator from the next element after the last if defined, otherwise initialize it at the first key. + let db_iterator = match max_key { + None => self.db.iterator_cf(handle, IteratorMode::Start), + Some(max_key) => { + let mut iter = self + .db + .iterator_cf(handle, IteratorMode::From(&max_key, Direction::Forward)); + iter.next(); + iter + } + }; + + for (serialized_key, serialized_value) in db_iterator.flatten() { + if new_elements.len() < self.config.max_new_elements { + new_elements.insert(serialized_key.to_vec(), serialized_value.to_vec()); + } else { + break; + } + } + + Ok(StreamBatch { + new_elements, + updates_on_previous_elements, + change_id: self.get_change_id().expect(CHANGE_ID_DESER_ERROR), + }) + } + + /// Used for: + /// - Bootstrap clients, to write on disk a new received Stream (reset_history: true) + /// - Normal operations, to write changes associated to a given change_id (reset_history: false) + /// + pub fn write_changes( + &mut self, + changes: BTreeMap>, + versioning_changes: BTreeMap>, + change_id: Option, + reset_history: bool, + ) -> Result<(), MassaDBError> { + if let Some(change_id) = change_id.clone() { + if change_id < self.get_change_id().expect(CHANGE_ID_DESER_ERROR) { + return Err(MassaDBError::InvalidChangeID(String::from( + "change_id should monotonically increase after every write", + ))); + } + } + + let handle_state = self.db.cf_handle(STATE_CF).expect(CF_ERROR); + let handle_metadata = self.db.cf_handle(METADATA_CF).expect(CF_ERROR); + let handle_versioning = self.db.cf_handle(VERSIONING_CF).expect(CF_ERROR); + + *self.current_batch.lock() = WriteBatch::default(); + + for (key, value) in changes.iter() { + if let Some(value) = value { + self.current_batch.lock().put_cf(handle_state, key, value); + let key_hash = Hash::compute_from(key); + let value_hash = Hash::compute_from(value); + + self.lsmtree + .update( + key_hash.to_bytes(), + Bytes::from(value_hash.to_bytes().to_vec()), + ) + .expect(LSMTREE_ERROR); + } else { + self.current_batch.lock().delete_cf(handle_state, key); + let key_hash = Hash::compute_from(key); + + self.lsmtree + .remove(key_hash.to_bytes()) + .expect(LSMTREE_ERROR); + } + } + + // in versioning_changes, we have the data that we do not want to include in hash + // e.g everything that is not in 'Active' state (so hashes remain compatibles) + for (key, value) in versioning_changes.iter() { + if let Some(value) = value { + self.current_batch + .lock() + .put_cf(handle_versioning, key, value); + } else { + self.current_batch.lock().delete_cf(handle_versioning, key); + } + } + + if let Some(change_id) = change_id { + self.set_change_id_to_batch(change_id); + } + + self.current_batch + .lock() + .put_cf(handle_metadata, STATE_HASH_KEY, self.lsmtree.root()); + + { + let mut current_batch_guard = self.current_batch.lock(); + let batch = WriteBatch::from_data(current_batch_guard.data()); + current_batch_guard.clear(); + + self.db.write(batch).map_err(|e| { + MassaDBError::RocksDBError(format!("Can't write batch to disk: {}", e)) + })?; + } + + self.current_hashmap.write().clear(); + + self.change_history + .entry(self.get_change_id().expect(CHANGE_ID_DESER_ERROR)) + .and_modify(|map| map.extend(changes.clone().into_iter())) + .or_insert(changes); + + self.change_history_versioning + .entry(self.get_change_id().expect(CHANGE_ID_DESER_ERROR)) + .and_modify(|map| map.extend(versioning_changes.clone().into_iter())) + .or_insert(versioning_changes); + + if reset_history { + self.change_history.clear(); + } + + while self.change_history.len() > self.config.max_history_length { + self.change_history.pop_first(); + } + + Ok(()) + } + + /// Get the current change_id attached to the database. + pub fn get_change_id(&self) -> Result { + let db = &self.db; + let handle = db.cf_handle(METADATA_CF).expect(CF_ERROR); + + let Ok(Some(change_id_bytes)) = db.get_pinned_cf(handle, CHANGE_ID_KEY) else { + return Err(ModelsError::BufferError(String::from("Could not recover change_id in database"))); + }; + + let (_rest, change_id) = self + .change_id_deserializer + .deserialize::(&change_id_bytes) + .expect(CHANGE_ID_DESER_ERROR); + + Ok(change_id) + } + + /// Set the initial change_id. This function should only be called at startup/reset, as it does not batch this set with other changes. + pub fn set_initial_change_id(&self, change_id: ChangeID) { + self.current_batch.lock().clear(); + + self.set_change_id_to_batch(change_id); + + let batch; + { + let mut current_batch_guard = self.current_batch.lock(); + batch = WriteBatch::from_data(current_batch_guard.data()); + current_batch_guard.clear(); + + self.db.write(batch).expect(CRUD_ERROR); + } + } + + /// Set the current change_id in the batch + pub fn set_change_id_to_batch(&self, change_id: ChangeID) { + let handle_metadata = self.db.cf_handle(METADATA_CF).expect(CF_ERROR); + + let mut change_id_bytes = Vec::new(); + self.change_id_serializer + .serialize(&change_id, &mut change_id_bytes) + .expect(CHANGE_ID_SER_ERROR); + + self.current_batch + .lock() + .put_cf(handle_metadata, CHANGE_ID_KEY, &change_id_bytes); + } + + /// Write a stream_batch of database entries received from a bootstrap server + pub fn write_batch_bootstrap_client( + &mut self, + stream_changes: StreamBatch, + stream_changes_versioning: StreamBatch, + ) -> Result<(StreamingStep, StreamingStep), MassaDBError> { + let mut changes = BTreeMap::new(); + + let new_cursor = match stream_changes.new_elements.last_key_value() { + Some((k, _)) => StreamingStep::Ongoing(k.clone()), + None => { + let handle = self.db.cf_handle(STATE_CF).expect(CF_ERROR); + match self.db.iterator_cf(handle, IteratorMode::End).next() { + Some(Ok((serialized_key, _value))) => { + StreamingStep::Finished(Some(serialized_key.to_vec())) + } + _ => StreamingStep::Finished(None), + } + } + }; + + changes.extend(stream_changes.updates_on_previous_elements); + changes.extend( + stream_changes + .new_elements + .iter() + .map(|(k, v)| (k.clone(), Some(v.clone()))), + ); + + let mut versioning_changes = BTreeMap::new(); + + let new_cursor_versioning = match stream_changes_versioning.new_elements.last_key_value() { + Some((k, _)) => StreamingStep::Ongoing(k.clone()), + None => { + let handle = self.db.cf_handle(VERSIONING_CF).expect(CF_ERROR); + match self.db.iterator_cf(handle, IteratorMode::End).next() { + Some(Ok((serialized_key, _value))) => { + StreamingStep::Finished(Some(serialized_key.to_vec())) + } + _ => StreamingStep::Finished(None), + } + } + }; + + versioning_changes.extend(stream_changes_versioning.updates_on_previous_elements); + versioning_changes.extend( + stream_changes + .new_elements + .iter() + .map(|(k, v)| (k.clone(), Some(v.clone()))), + ); + + self.write_changes( + changes, + versioning_changes, + Some(stream_changes.change_id), + true, + )?; + + Ok((new_cursor, new_cursor_versioning)) + } + + /// Get the current state hash of the database + pub fn get_db_hash(&self) -> Hash { + self.get_db_hash_opt() + .unwrap_or(Hash::from_bytes(STATE_HASH_INITIAL_BYTES)) + } + + /// Get the current state hash of the database + fn get_db_hash_opt(&self) -> Option { + let db = &self.db; + let handle = db.cf_handle(METADATA_CF).expect(CF_ERROR); + + db.get_cf(handle, STATE_HASH_KEY) + .expect(CRUD_ERROR) + .as_deref() + .map(|state_hash_bytes| { + Hash::from_bytes(state_hash_bytes.try_into().expect(STATE_HASH_ERROR)) + }) + } +} + +impl RawMassaDB { + /// Returns a new `MassaDB` instance + pub fn new(config: MassaDBConfig) -> Self { + let mut db_opts = Options::default(); + db_opts.create_if_missing(true); + db_opts.create_missing_column_families(true); + + let db = DB::open_cf_descriptors( + &db_opts, + &config.path, + vec![ + ColumnFamilyDescriptor::new(STATE_CF, Options::default()), + ColumnFamilyDescriptor::new(METADATA_CF, Options::default()), + ColumnFamilyDescriptor::new(LSMTREE_NODES_CF, Options::default()), + ColumnFamilyDescriptor::new(LSMTREE_VALUES_CF, Options::default()), + ColumnFamilyDescriptor::new(VERSIONING_CF, Options::default()), + ], + ) + .expect(OPEN_ERROR); + + let db = Arc::new(db); + let current_batch = Arc::new(Mutex::new(WriteBatch::default())); + let current_hashmap = Arc::new(RwLock::new(HashMap::new())); + + let change_id_deserializer = SlotDeserializer::new( + (Included(u64::MIN), Included(u64::MAX)), + (Included(0), Excluded(config.thread_count)), + ); + + let nodes_store = MassaDbLsmtree::new( + LSMTREE_NODES_CF, + db.clone(), + current_batch.clone(), + current_hashmap.clone(), + ); + let values_store = MassaDbLsmtree::new( + LSMTREE_VALUES_CF, + db.clone(), + current_batch.clone(), + current_hashmap.clone(), + ); + + let handle_metadata = db.cf_handle(METADATA_CF).expect(CF_ERROR); + let lsmtree = match db + .get_cf(handle_metadata, STATE_HASH_KEY) + .expect(CRUD_ERROR) + { + Some(hash_bytes) => SparseMerkleTree::import(nodes_store, values_store, hash_bytes), + _ => SparseMerkleTree::new_with_stores(nodes_store, values_store), + }; + + let massa_db = Self { + db, + config, + change_history: BTreeMap::new(), + change_history_versioning: BTreeMap::new(), + change_id_serializer: SlotSerializer::new(), + change_id_deserializer, + lsmtree, + current_batch, + current_hashmap, + }; + + if massa_db.get_change_id().is_err() { + massa_db.set_initial_change_id(Slot { + period: 0, + thread: 0, + }); + } + + massa_db + } + + /// Creates a new hard copy of the DB, for the given slot + pub fn backup_db(&self, slot: Slot) { + let db = &self.db; + + let subpath = format!("backup_{}_{}", slot.period, slot.thread); + + Checkpoint::new(db) + .expect("Cannot init checkpoint") + .create_checkpoint(db.path().join(subpath)) + .expect("Failed to create checkpoint"); + } + + /// Writes the batch to the DB + pub fn write_batch( + &mut self, + batch: DBBatch, + versioning_batch: DBBatch, + change_id: Option, + ) { + self.write_changes(batch, versioning_batch, change_id, false) + .expect(CRUD_ERROR); + } + + /// Utility function to put / update a key & value in the batch + pub fn put_or_update_entry_value(&self, batch: &mut DBBatch, key: Vec, value: &[u8]) { + batch.insert(key, Some(value.to_vec())); + } + + /// Utility function to delete a key & value in the batch + pub fn delete_key(&self, batch: &mut DBBatch, key: Vec) { + batch.insert(key, None); + } + + /// Utility function to delete all keys in a prefix + pub fn delete_prefix(&mut self, prefix: &str, handle_str: &str, change_id: Option) { + let db = &self.db; + + let handle = db.cf_handle(handle_str).expect(CF_ERROR); + let mut batch = DBBatch::new(); + for (serialized_key, _) in db.prefix_iterator_cf(handle, prefix).flatten() { + if !serialized_key.starts_with(prefix.as_bytes()) { + break; + } + + self.delete_key(&mut batch, serialized_key.to_vec()); + } + + match handle_str { + STATE_CF => { + self.write_batch(batch, DBBatch::new(), change_id); + } + VERSIONING_CF => { + self.write_batch(DBBatch::new(), batch, change_id); + } + _ => {} + } + } + + /// Reset the database, and attach it to the given slot. + pub fn reset(&mut self, slot: Slot) { + self.set_initial_change_id(slot); + self.change_history.clear(); + self.current_hashmap.write().clear(); + } +} diff --git a/massa-executed-ops/Cargo.toml b/massa-executed-ops/Cargo.toml index 162d0d7f34c..e42d0e7347b 100644 --- a/massa-executed-ops/Cargo.toml +++ b/massa-executed-ops/Cargo.toml @@ -1,11 +1,18 @@ [package] name = "massa_executed_ops" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" [dependencies] -nom = "7.1" +nom = "=7.1" +rocksdb = "0.20" +parking_lot = { version = "0.12", features = ["deadlock_detection"] } massa_models = { path = "../massa-models" } massa_hash = { path = "../massa-hash" } massa_serialization = { path = "../massa-serialization" } +massa_db = { path = "../massa-db" } + +[dev-dependencies] +tempfile = "3.3" +massa_ledger_worker = { path = "../massa-ledger-worker" } \ No newline at end of file diff --git a/massa-executed-ops/src/config.rs b/massa-executed-ops/src/config.rs index 9714067887b..73bd4772c4f 100644 --- a/massa-executed-ops/src/config.rs +++ b/massa-executed-ops/src/config.rs @@ -4,14 +4,14 @@ pub struct ExecutedOpsConfig { /// Number of threads pub thread_count: u8, - /// Maximum size of a bootstrap part - pub bootstrap_part_size: u64, } #[derive(Debug, Clone)] pub struct ExecutedDenunciationsConfig { /// Period delta for denunciation to expire pub denunciation_expire_periods: u64, - /// Maximum size of a bootstrap part - pub bootstrap_part_size: u64, + /// Number of threads + pub thread_count: u8, + /// Number of endorsements + pub endorsement_count: u32, } diff --git a/massa-executed-ops/src/executed_denunciations.rs b/massa-executed-ops/src/executed_denunciations.rs index a06c0d7dc33..70e9db07f0f 100644 --- a/massa-executed-ops/src/executed_denunciations.rs +++ b/massa-executed-ops/src/executed_denunciations.rs @@ -4,7 +4,8 @@ //! Used to detect denunciation reuse. use std::collections::{BTreeMap, HashSet}; -use std::ops::Bound::{Excluded, Included, Unbounded}; +use std::ops::Bound::{Excluded, Included}; +use std::sync::Arc; use nom::{ error::{context, ContextError, ParseError}, @@ -12,103 +13,149 @@ use nom::{ sequence::tuple, IResult, Parser, }; +use parking_lot::RwLock; use crate::{ExecutedDenunciationsChanges, ExecutedDenunciationsConfig}; - -use massa_hash::{Hash, HASH_SIZE_BYTES}; +use massa_db::{ + DBBatch, MassaDB, CF_ERROR, CRUD_ERROR, EXECUTED_DENUNCIATIONS_INDEX_DESER_ERROR, + EXECUTED_DENUNCIATIONS_INDEX_SER_ERROR, EXECUTED_DENUNCIATIONS_PREFIX, STATE_CF, +}; use massa_models::denunciation::Denunciation; -use massa_models::streaming_step::StreamingStep; use massa_models::{ denunciation::{DenunciationIndex, DenunciationIndexDeserializer, DenunciationIndexSerializer}, slot::{Slot, SlotDeserializer, SlotSerializer}, }; use massa_serialization::{ - Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, + DeserializeError, Deserializer, SerializeError, Serializer, U64VarIntDeserializer, + U64VarIntSerializer, }; -const EXECUTED_DENUNCIATIONS_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; +/// Denunciation index key formatting macro +#[macro_export] +macro_rules! denunciation_index_key { + ($id:expr) => { + [&EXECUTED_DENUNCIATIONS_PREFIX.as_bytes(), &$id[..]].concat() + }; +} /// A structure to list and prune previously executed denunciations -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct ExecutedDenunciations { /// Executed denunciations configuration config: ExecutedDenunciationsConfig, + /// Access to the RocksDB database + pub db: Arc>, /// for better pruning complexity pub sorted_denunciations: BTreeMap>, - /// for better insertion complexity - pub denunciations: HashSet, - /// Accumulated hash of the executed denunciations - pub hash: Hash, + /// for rocksdb serialization + denunciation_index_serializer: DenunciationIndexSerializer, + /// for rocksdb deserialization + denunciation_index_deserializer: DenunciationIndexDeserializer, } impl ExecutedDenunciations { /// Create a new `ExecutedDenunciations` - pub fn new(config: ExecutedDenunciationsConfig) -> Self { + pub fn new(config: ExecutedDenunciationsConfig, db: Arc>) -> Self { + let denunciation_index_deserializer = + DenunciationIndexDeserializer::new(config.thread_count, config.endorsement_count); Self { config, + db, sorted_denunciations: Default::default(), - denunciations: Default::default(), - hash: Hash::from_bytes(EXECUTED_DENUNCIATIONS_HASH_INITIAL_BYTES), + denunciation_index_serializer: DenunciationIndexSerializer::new(), + denunciation_index_deserializer, } } - /// Reset the executed operations - /// - /// USED FOR BOOTSTRAP ONLY - pub fn reset(&mut self) { + /// Recomputes the local caches after bootstrap or loading the state from disk + pub fn recompute_sorted_denunciations(&mut self) { self.sorted_denunciations.clear(); - self.denunciations.clear(); - self.hash = Hash::from_bytes(EXECUTED_DENUNCIATIONS_HASH_INITIAL_BYTES); - } - /// Returns the number of executed operations - pub fn len(&self) -> usize { - self.denunciations.len() + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + for (serialized_de_idx, _) in db + .db + .prefix_iterator_cf(handle, EXECUTED_DENUNCIATIONS_PREFIX) + .flatten() + { + if !serialized_de_idx.starts_with(EXECUTED_DENUNCIATIONS_PREFIX.as_bytes()) { + break; + } + let (_, de_idx) = self + .denunciation_index_deserializer + .deserialize::( + &serialized_de_idx[EXECUTED_DENUNCIATIONS_PREFIX.len()..], + ) + .expect(EXECUTED_DENUNCIATIONS_INDEX_DESER_ERROR); + + self.sorted_denunciations + .entry(*de_idx.get_slot()) + .and_modify(|ids| { + ids.insert(de_idx); + }) + .or_insert_with(|| { + let mut new = HashSet::default(); + new.insert(de_idx); + new + }); + } } - /// Check executed ops emptiness - pub fn is_empty(&self) -> bool { - self.denunciations.is_empty() + /// Reset the executed denunciations + /// + /// USED FOR BOOTSTRAP ONLY + pub fn reset(&mut self) { + { + let mut db = self.db.write(); + db.delete_prefix(EXECUTED_DENUNCIATIONS_PREFIX, STATE_CF, None); + } + + self.recompute_sorted_denunciations(); } /// Check if a denunciation (e.g. a denunciation index) was executed pub fn contains(&self, de_idx: &DenunciationIndex) -> bool { - self.denunciations.contains(de_idx) - } + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); - /// Internal function used to insert the values of an operation id iter and update the object hash - fn extend_and_compute_hash<'a, I>(&mut self, values: I) - where - I: Iterator, - { - for de_idx in values { - if self.denunciations.insert((*de_idx).clone()) { - self.hash ^= de_idx.get_hash(); - } - } + let mut serialized_de_idx = Vec::new(); + self.denunciation_index_serializer + .serialize(de_idx, &mut serialized_de_idx) + .expect(EXECUTED_DENUNCIATIONS_INDEX_SER_ERROR); + + db.db + .get_cf(handle, denunciation_index_key!(serialized_de_idx)) + .expect(CRUD_ERROR) + .is_some() } /// Apply speculative operations changes to the final executed denunciations state - pub fn apply_changes(&mut self, changes: ExecutedDenunciationsChanges, slot: Slot) { - self.extend_and_compute_hash(changes.iter()); + pub fn apply_changes_to_batch( + &mut self, + changes: ExecutedDenunciationsChanges, + slot: Slot, + batch: &mut DBBatch, + ) { for de_idx in changes { + self.put_entry(&de_idx, batch); self.sorted_denunciations .entry(*de_idx.get_slot()) .and_modify(|ids| { - ids.insert(de_idx.clone()); + ids.insert(de_idx); }) .or_insert_with(|| { let mut new = HashSet::default(); - new.insert(de_idx.clone()); + new.insert(de_idx); new }); } - self.prune(slot); + self.prune_to_batch(slot, batch); } /// Prune all denunciations that have expired, assuming the given slot is final - fn prune(&mut self, slot: Slot) { + fn prune_to_batch(&mut self, slot: Slot, batch: &mut DBBatch) { let drained: Vec<(Slot, HashSet)> = self .sorted_denunciations .drain_filter(|de_idx_slot, _| { @@ -122,67 +169,61 @@ impl ExecutedDenunciations { for (_slot, de_indexes) in drained { for de_idx in de_indexes { - self.denunciations.remove(&de_idx); - self.hash ^= de_idx.get_hash(); + self.delete_entry(&de_idx, batch) } } } - /// Get a part of the executed denunciations. - /// Used exclusively by the bootstrap server. + /// Add a denunciation_index to the DB /// - /// # Returns - /// A tuple containing the data and the next executed de streaming step - pub fn get_executed_de_part( - &self, - cursor: StreamingStep, - ) -> ( - BTreeMap>, - StreamingStep, - ) { - let mut de_part = BTreeMap::new(); - let left_bound = match cursor { - StreamingStep::Started => Unbounded, - StreamingStep::Ongoing(slot) => Excluded(slot), - StreamingStep::Finished(_) => return (de_part, cursor), - }; - let mut de_part_last_slot: Option = None; - for (slot, ids) in self.sorted_denunciations.range((left_bound, Unbounded)) { - if de_part.len() < self.config.bootstrap_part_size as usize { - de_part.insert(*slot, ids.clone()); - de_part_last_slot = Some(*slot); - } else { - break; - } - } - if let Some(last_slot) = de_part_last_slot { - (de_part, StreamingStep::Ongoing(last_slot)) - } else { - (de_part, StreamingStep::Finished(None)) - } + /// # Arguments + /// * `de_idx` + /// * `batch`: the given operation batch to update + fn put_entry(&self, de_idx: &DenunciationIndex, batch: &mut DBBatch) { + let db = self.db.read(); + + let mut serialized_de_idx = Vec::new(); + self.denunciation_index_serializer + .serialize(de_idx, &mut serialized_de_idx) + .expect(EXECUTED_DENUNCIATIONS_INDEX_SER_ERROR); + + db.put_or_update_entry_value(batch, denunciation_index_key!(serialized_de_idx), b""); } - /// Set a part of the executed denunciations. - /// Used exclusively by the bootstrap client. - /// Takes the data returned from `get_executed_de_part` as input. + /// Remove a denunciation_index from the DB /// - /// # Returns - /// The next executed de streaming step - pub fn set_executed_de_part( - &mut self, - part: BTreeMap>, - ) -> StreamingStep { - self.sorted_denunciations.extend(part.clone()); - self.extend_and_compute_hash(part.iter().flat_map(|(_, ids)| ids)); - if let Some(slot) = self - .sorted_denunciations - .last_key_value() - .map(|(slot, _)| slot) - { - StreamingStep::Ongoing(*slot) - } else { - StreamingStep::Finished(None) + /// # Arguments + /// * `de_idx`: the denunciation index to remove + /// * batch: the given operation batch to update + fn delete_entry(&self, de_idx: &DenunciationIndex, batch: &mut DBBatch) { + let db = self.db.read(); + + let mut serialized_de_idx = Vec::new(); + self.denunciation_index_serializer + .serialize(de_idx, &mut serialized_de_idx) + .expect(EXECUTED_DENUNCIATIONS_INDEX_SER_ERROR); + + db.delete_key(batch, denunciation_index_key!(serialized_de_idx)); + } + + /// Deserializes the key and value, useful after bootstrap + pub fn is_key_value_valid(&self, serialized_key: &[u8], serialized_value: &[u8]) -> bool { + if !serialized_key.starts_with(EXECUTED_DENUNCIATIONS_PREFIX.as_bytes()) { + return false; + } + + let Ok((rest, _idx)) = self.denunciation_index_deserializer.deserialize::(&serialized_key[EXECUTED_DENUNCIATIONS_PREFIX.len()..]) else { + return false; + }; + if !rest.is_empty() { + return false; } + + if !serialized_value.is_empty() { + return false; + } + + true } } diff --git a/massa-executed-ops/src/executed_ops.rs b/massa-executed-ops/src/executed_ops.rs index f5550068da2..cb5384d8fa8 100644 --- a/massa-executed-ops/src/executed_ops.rs +++ b/massa-executed-ops/src/executed_ops.rs @@ -4,16 +4,18 @@ //! Used to detect operation reuse. use crate::{ops_changes::ExecutedOpsChanges, ExecutedOpsConfig}; -use massa_hash::{Hash, HASH_SIZE_BYTES}; +use massa_db::{ + DBBatch, MassaDB, CF_ERROR, CRUD_ERROR, EXECUTED_OPS_ID_DESER_ERROR, EXECUTED_OPS_ID_SER_ERROR, + EXECUTED_OPS_PREFIX, STATE_CF, +}; use massa_models::{ - operation::{OperationId, OperationIdDeserializer}, + operation::{OperationId, OperationIdDeserializer, OperationIdSerializer}, prehash::PreHashSet, - secure_share::Id, slot::{Slot, SlotDeserializer, SlotSerializer}, - streaming_step::StreamingStep, }; use massa_serialization::{ - Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, + BoolDeserializer, BoolSerializer, DeserializeError, Deserializer, SerializeError, Serializer, + U64VarIntDeserializer, U64VarIntSerializer, }; use nom::{ error::{context, ContextError, ParseError}, @@ -21,63 +23,103 @@ use nom::{ sequence::tuple, IResult, Parser, }; +use parking_lot::RwLock; use std::{ - collections::{BTreeMap, HashMap}, - ops::Bound::{Excluded, Included, Unbounded}, + collections::{BTreeMap, HashMap, HashSet}, + ops::Bound::{Excluded, Included}, + sync::Arc, }; -const EXECUTED_OPS_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; +/// Op id key formatting macro +#[macro_export] +macro_rules! op_id_key { + ($id:expr) => { + [&EXECUTED_OPS_PREFIX.as_bytes(), &$id[..]].concat() + }; +} /// A structure to list and prune previously executed operations -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct ExecutedOps { /// Executed operations configuration - config: ExecutedOpsConfig, + _config: ExecutedOpsConfig, + /// RocksDB Instance + pub db: Arc>, /// Executed operations btreemap with slot as index for better pruning complexity pub sorted_ops: BTreeMap>, - /// Executed operations only for better insertion complexity - pub ops: PreHashSet, - /// Accumulated hash of the executed operations - pub hash: Hash, /// execution status of operations (true: success, false: fail) pub op_exec_status: HashMap, + operation_id_deserializer: OperationIdDeserializer, + operation_id_serializer: OperationIdSerializer, + bool_deserializer: BoolDeserializer, + bool_serializer: BoolSerializer, + slot_deserializer: SlotDeserializer, + slot_serializer: SlotSerializer, } impl ExecutedOps { /// Creates a new `ExecutedOps` - pub fn new(config: ExecutedOpsConfig) -> Self { + pub fn new(config: ExecutedOpsConfig, db: Arc>) -> Self { + let slot_deserializer = SlotDeserializer::new( + (Included(u64::MIN), Included(u64::MAX)), + (Included(0), Excluded(config.thread_count)), + ); Self { - config, + _config: config, + db, sorted_ops: BTreeMap::new(), - ops: PreHashSet::default(), - hash: Hash::from_bytes(EXECUTED_OPS_HASH_INITIAL_BYTES), op_exec_status: HashMap::new(), + operation_id_deserializer: OperationIdDeserializer::new(), + operation_id_serializer: OperationIdSerializer::new(), + bool_deserializer: BoolDeserializer::new(), + bool_serializer: BoolSerializer::new(), + slot_deserializer, + slot_serializer: SlotSerializer::new(), } } - /// Creates a new `ExecutedOps` and computes the hash - /// Useful when restarting from a snapshot - pub fn new_with_hash( - config: ExecutedOpsConfig, - sorted_ops: BTreeMap>, - ) -> Self { - let mut hash = Hash::from_bytes(EXECUTED_OPS_HASH_INITIAL_BYTES); - let mut ops = PreHashSet::default(); - for (_, op_ids) in sorted_ops.clone() { - for op_id in op_ids { - if ops.insert(op_id) { - // This let's us compute the accumulated hash of all op_ids in the struct. - // We XOR the hash to allow reversibility if we remove an op_id. - hash ^= *op_id.get_hash(); - } + /// Recomputes the local caches after bootstrap or loading the state from disk + pub fn recompute_sorted_ops_and_op_exec_status(&mut self) { + self.sorted_ops.clear(); + self.op_exec_status.clear(); + + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + for (serialized_op_id, serialized_value) in db + .db + .prefix_iterator_cf(handle, EXECUTED_OPS_PREFIX) + .flatten() + { + if !serialized_op_id.starts_with(EXECUTED_OPS_PREFIX.as_bytes()) { + break; } - } - Self { - config, - sorted_ops, - ops, - hash, - op_exec_status: HashMap::new(), + + let (_, op_id) = self + .operation_id_deserializer + .deserialize::(&serialized_op_id[EXECUTED_OPS_PREFIX.len()..]) + .expect(EXECUTED_OPS_ID_DESER_ERROR); + + let (rest, op_exec_status) = self + .bool_deserializer + .deserialize::(&serialized_value) + .expect(EXECUTED_OPS_ID_DESER_ERROR); + let (_, slot) = self + .slot_deserializer + .deserialize::(rest) + .expect(EXECUTED_OPS_ID_DESER_ERROR); + + self.sorted_ops + .entry(slot) + .and_modify(|ids| { + ids.insert(op_id); + }) + .or_insert_with(|| { + let mut new = HashSet::default(); + new.insert(op_id); + new + }); + self.op_exec_status.insert(op_id, op_exec_status); } } @@ -85,36 +127,24 @@ impl ExecutedOps { /// /// USED FOR BOOTSTRAP ONLY pub fn reset(&mut self) { - self.sorted_ops.clear(); - self.ops.clear(); - self.hash = Hash::from_bytes(EXECUTED_OPS_HASH_INITIAL_BYTES); - } - - /// Returns the number of executed operations - pub fn len(&self) -> usize { - self.ops.len() - } + self.db + .write() + .delete_prefix(EXECUTED_OPS_PREFIX, STATE_CF, None); - /// Check executed ops emptiness - pub fn is_empty(&self) -> bool { - self.ops.is_empty() + self.recompute_sorted_ops_and_op_exec_status(); } - /// Internal function used to insert the values of an operation id iter and update the object hash - fn extend_and_compute_hash<'a, I>(&mut self, values: I) - where - I: Iterator, - { - for op_id in values { - if self.ops.insert(*op_id) { - self.hash ^= *op_id.get_hash(); - } + /// Apply speculative operations changes to the final executed operations state + pub fn apply_changes_to_batch( + &mut self, + changes: ExecutedOpsChanges, + slot: Slot, + batch: &mut DBBatch, + ) { + for (id, value) in changes.iter() { + self.put_entry(id, value, batch); } - } - /// Apply speculative operations changes to the final executed operations state - pub fn apply_changes(&mut self, changes: ExecutedOpsChanges, slot: Slot) { - self.extend_and_compute_hash(changes.keys()); for (op_id, (op_exec_success, slot)) in changes { self.sorted_ops .entry(slot) @@ -126,97 +156,138 @@ impl ExecutedOps { new.insert(op_id); new }); - self.op_exec_status.insert(op_id, op_exec_success); } - self.prune(slot); + self.prune_to_batch(slot, batch); } /// Check if an operation was executed pub fn contains(&self, op_id: &OperationId) -> bool { - self.ops.contains(op_id) + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let mut serialized_op_id = Vec::new(); + self.operation_id_serializer + .serialize(op_id, &mut serialized_op_id) + .expect(EXECUTED_OPS_ID_SER_ERROR); + + db.db + .get_cf(handle, op_id_key!(serialized_op_id)) + .expect(CRUD_ERROR) + .is_some() } /// Prune all operations that expire strictly before `slot` - fn prune(&mut self, slot: Slot) { + fn prune_to_batch(&mut self, slot: Slot, batch: &mut DBBatch) { let kept = self.sorted_ops.split_off(&slot); let removed = std::mem::take(&mut self.sorted_ops); for (_, ids) in removed { for op_id in ids { - self.ops.remove(&op_id); self.op_exec_status.remove(&op_id); - self.hash ^= *op_id.get_hash(); + self.delete_entry(&op_id, batch); } } self.sorted_ops = kept; } - /// Get a part of the executed operations. - /// Used exclusively by the bootstrap server. + /// Add an executed_op to the DB /// - /// # Returns - /// A tuple containing the data and the next executed ops streaming step - pub fn get_executed_ops_part( - &self, - cursor: StreamingStep, - ) -> (BTreeMap>, StreamingStep) { - let mut ops_part = BTreeMap::new(); - let left_bound = match cursor { - StreamingStep::Started => Unbounded, - StreamingStep::Ongoing(slot) => Excluded(slot), - StreamingStep::Finished(_) => return (ops_part, cursor), - }; - let mut ops_part_last_slot: Option = None; - for (slot, ids) in self.sorted_ops.range((left_bound, Unbounded)) { - if ops_part.len() < self.config.bootstrap_part_size as usize { - ops_part.insert(*slot, ids.clone()); - ops_part_last_slot = Some(*slot); - } else { - break; - } - } - if let Some(last_slot) = ops_part_last_slot { - (ops_part, StreamingStep::Ongoing(last_slot)) - } else { - (ops_part, StreamingStep::Finished(None)) - } + /// # Arguments + /// * `op_id` + /// * `value`: execution status and validity slot + /// * `batch`: the given operation batch to update + fn put_entry(&self, op_id: &OperationId, value: &(bool, Slot), batch: &mut DBBatch) { + let db = self.db.read(); + + let mut serialized_op_id = Vec::new(); + self.operation_id_serializer + .serialize(op_id, &mut serialized_op_id) + .expect(EXECUTED_OPS_ID_SER_ERROR); + + let mut serialized_op_value = Vec::new(); + self.bool_serializer + .serialize(&value.0, &mut serialized_op_value) + .expect(EXECUTED_OPS_ID_SER_ERROR); + self.slot_serializer + .serialize(&value.1, &mut serialized_op_value) + .expect(EXECUTED_OPS_ID_SER_ERROR); + + db.put_or_update_entry_value(batch, op_id_key!(serialized_op_id), &serialized_op_value); } - /// Set a part of the executed operations. - /// Used exclusively by the bootstrap client. - /// Takes the data returned from `get_executed_ops_part` as input. + /// Remove a op_id from the DB /// - /// # Returns - /// The next executed ops streaming step - pub fn set_executed_ops_part( - &mut self, - part: BTreeMap>, - ) -> StreamingStep { - self.sorted_ops.extend(part.clone()); - self.extend_and_compute_hash(part.iter().flat_map(|(_, ids)| ids)); - if let Some(slot) = self.sorted_ops.last_key_value().map(|(slot, _)| slot) { - StreamingStep::Ongoing(*slot) - } else { - StreamingStep::Finished(None) + /// # Arguments + /// * batch: the given operation batch to update + fn delete_entry(&self, op_id: &OperationId, batch: &mut DBBatch) { + let db = self.db.read(); + + let mut serialized_op_id = Vec::new(); + self.operation_id_serializer + .serialize(op_id, &mut serialized_op_id) + .expect(EXECUTED_OPS_ID_SER_ERROR); + + db.delete_key(batch, op_id_key!(serialized_op_id)); + } + + /// Deserializes the key and value, useful after bootstrap + pub fn is_key_value_valid(&self, serialized_key: &[u8], serialized_value: &[u8]) -> bool { + if !serialized_key.starts_with(EXECUTED_OPS_PREFIX.as_bytes()) { + return false; } + + let Ok((rest, _id)) = self.operation_id_deserializer.deserialize::(&serialized_key[EXECUTED_OPS_PREFIX.len()..]) else { + return false; + }; + if !rest.is_empty() { + return false; + } + + let Ok((rest, _bool)) = self.bool_deserializer.deserialize::(serialized_value) else { + return false; + }; + let Ok((rest, _slot)) = self.slot_deserializer.deserialize::(rest) else { + return false; + }; + if !rest.is_empty() { + return false; + } + + true } } #[test] -fn test_executed_ops_xor_computing() { +fn test_executed_ops_hash_computing() { + use massa_db::{MassaDB, MassaDBConfig, STATE_HASH_INITIAL_BYTES}; + use massa_hash::Hash; use massa_models::prehash::PreHashMap; use massa_models::secure_share::Id; + use tempfile::TempDir; // initialize the executed ops config - let config = ExecutedOpsConfig { - thread_count: 2, - bootstrap_part_size: 10, + let thread_count = 2; + let config = ExecutedOpsConfig { thread_count }; + let tempdir_a = TempDir::new().expect("cannot create temp directory"); + let tempdir_c = TempDir::new().expect("cannot create temp directory"); + let db_a_config = MassaDBConfig { + path: tempdir_a.path().to_path_buf(), + max_history_length: 10, + max_new_elements: 100, + thread_count, }; - + let db_c_config = MassaDBConfig { + path: tempdir_c.path().to_path_buf(), + max_history_length: 10, + max_new_elements: 100, + thread_count, + }; + let db_a = Arc::new(RwLock::new(MassaDB::new(db_a_config))); + let db_c = Arc::new(RwLock::new(MassaDB::new(db_c_config))); // initialize the executed ops and executed ops changes - let mut a = ExecutedOps::new(config.clone()); - let mut c = ExecutedOps::new(config); + let mut a = ExecutedOps::new(config.clone(), db_a.clone()); + let mut c = ExecutedOps::new(config, db_c.clone()); let mut change_a = PreHashMap::default(); let mut change_b = PreHashMap::default(); let mut change_c = PreHashMap::default(); @@ -248,25 +319,43 @@ fn test_executed_ops_xor_computing() { period: 0, thread: 0, }; - a.apply_changes(change_a, apply_slot); - a.apply_changes(change_b, apply_slot); - c.apply_changes(change_c, apply_slot); + + let mut batch_a = DBBatch::new(); + a.apply_changes_to_batch(change_a, apply_slot, &mut batch_a); + db_a.write().write_batch(batch_a, Default::default(), None); + + let mut batch_b = DBBatch::new(); + a.apply_changes_to_batch(change_b, apply_slot, &mut batch_b); + db_a.write().write_batch(batch_b, Default::default(), None); + + let mut batch_c = DBBatch::new(); + c.apply_changes_to_batch(change_c, apply_slot, &mut batch_c); + db_c.write().write_batch(batch_c, Default::default(), None); // check that a.hash ^ $(change_b) = c.hash - assert_eq!(a.hash, c.hash, "'a' and 'c' hashes are not equal"); + assert_ne!( + db_a.read().get_db_hash(), + Hash::from_bytes(STATE_HASH_INITIAL_BYTES) + ); + assert_eq!( + db_a.read().get_db_hash(), + db_c.read().get_db_hash(), + "'a' and 'c' hashes are not equal" + ); // prune every element let prune_slot = Slot { period: 20, thread: 0, }; - a.apply_changes(PreHashMap::default(), prune_slot); - a.prune(prune_slot); + let mut batch_a = DBBatch::new(); + a.prune_to_batch(prune_slot, &mut batch_a); + db_a.write().write_batch(batch_a, Default::default(), None); - // at this point the hash should have been XORed with itself + // at this point the hash should have been reset to its original value assert_eq!( - a.hash, - Hash::from_bytes(EXECUTED_OPS_HASH_INITIAL_BYTES), + db_a.read().get_db_hash(), + Hash::from_bytes(STATE_HASH_INITIAL_BYTES), "'a' was not reset to its initial value" ); } diff --git a/massa-execution-exports/Cargo.toml b/massa-execution-exports/Cargo.toml index 05b2395e5d0..8140b8f8c69 100644 --- a/massa-execution-exports/Cargo.toml +++ b/massa-execution-exports/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_execution_exports" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -10,9 +10,13 @@ edition = "2021" displaydoc = "0.2" thiserror = "1.0" num = { version = "0.4", features = ["serde"] } -parking_lot = { version = "0.12", features = ["deadlock_detection"], optional = true } +parking_lot = { version = "0.12", features = [ + "deadlock_detection", +], optional = true } tempfile = { version = "3.3", optional = true } tokio = { version = "1.23", features = ["sync"] } +mockall = { version = "0.11.4", optional = true} + # custom modules massa_proto = { path = "../massa-proto" } massa_hash = { path = "../massa-hash" } @@ -22,10 +26,14 @@ massa_storage = { path = "../massa-storage" } massa_final_state = { path = "../massa-final-state" } massa_ledger_exports = { path = "../massa-ledger-exports", optional = true } massa_module_cache = { path = "../massa-module-cache" } -massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", branch="main" } +massa_versioning = { path = "../massa-versioning" } +massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", branch = "main" } + +[dev-dependencies] +mockall = "0.11.4" # for more information on what are the following features used for, see the cargo.toml at workspace level [features] gas_calibration = ["massa_ledger_exports/testing", "parking_lot", "tempfile"] -testing = ["massa_models/testing", "massa_ledger_exports/testing", "parking_lot", "tempfile"] +testing = ["massa_models/testing", "massa_ledger_exports/testing", "parking_lot", "tempfile", "mockall"] diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index 4a5af96650e..d719739fb0a 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -20,6 +20,7 @@ use massa_storage::Storage; use std::collections::BTreeMap; use std::collections::HashMap; +#[cfg_attr(any(test, feature = "testing"), mockall::automock)] /// interface that communicates with the execution worker thread pub trait ExecutionController: Send + Sync { /// Updates blockclique status by signaling newly finalized blocks and the latest blockclique. diff --git a/massa-execution-exports/src/error.rs b/massa-execution-exports/src/error.rs index aec0db11c2f..e73de84efe4 100644 --- a/massa-execution-exports/src/error.rs +++ b/massa-execution-exports/src/error.rs @@ -5,6 +5,7 @@ use displaydoc::Display; use massa_module_cache::error::CacheError; use massa_sc_runtime::VMError; +use massa_versioning::versioning_factory::FactoryError; use thiserror::Error; /// Errors of the execution component. @@ -69,4 +70,7 @@ pub enum ExecutionError { /// Cache error: {0} CacheError(#[from] CacheError), + + /// Factory error: {0} + FactoryError(#[from] FactoryError), } diff --git a/massa-execution-exports/src/lib.rs b/massa-execution-exports/src/lib.rs index 26aeaea2e9d..b56d2ab01c3 100644 --- a/massa-execution-exports/src/lib.rs +++ b/massa-execution-exports/src/lib.rs @@ -53,6 +53,8 @@ mod settings; mod types; pub use channels::ExecutionChannels; +#[cfg(any(test, feature = "testing"))] +pub use controller_traits::MockExecutionController; pub use controller_traits::{ExecutionController, ExecutionManager}; pub use error::ExecutionError; pub use event_store::EventStore; diff --git a/massa-execution-exports/src/test_exports/config.rs b/massa-execution-exports/src/test_exports/config.rs index f99947724e9..0b85a37d204 100644 --- a/massa-execution-exports/src/test_exports/config.rs +++ b/massa-execution-exports/src/test_exports/config.rs @@ -14,9 +14,7 @@ impl Default for ExecutionConfig { fn default() -> Self { let storage_costs_constants = StorageCostsConstants { ledger_cost_per_byte: LEDGER_COST_PER_BYTE, - ledger_entry_base_cost: LEDGER_COST_PER_BYTE - .checked_mul_u64(LEDGER_ENTRY_BASE_SIZE as u64) - .expect("Overflow when creating constant ledger_entry_base_cost"), + ledger_entry_base_cost: LEDGER_ENTRY_BASE_COST, ledger_entry_datastore_base_cost: LEDGER_COST_PER_BYTE .checked_mul_u64(LEDGER_ENTRY_DATASTORE_BASE_SIZE as u64) .expect("Overflow when creating constant ledger_entry_datastore_base_size"), @@ -36,7 +34,7 @@ impl Default for ExecutionConfig { periods_per_cycle: PERIODS_PER_CYCLE, // reset genesis timestamp because we are in test mode that can take a while to process genesis_timestamp: MassaTime::now().expect("Impossible to reset the timestamp in test"), - t0: 64.into(), + t0: MassaTime::from_millis(64), stats_time_window_duration: MassaTime::from_millis(30000), max_miss_ratio: *POS_MISS_RATE_DEACTIVATION_THRESHOLD, max_datastore_key_length: MAX_DATASTORE_KEY_LENGTH, diff --git a/massa-execution-exports/src/test_exports/mock.rs b/massa-execution-exports/src/test_exports/mock.rs index a731dc5cd5e..05070850e1e 100644 --- a/massa-execution-exports/src/test_exports/mock.rs +++ b/massa-execution-exports/src/test_exports/mock.rs @@ -233,7 +233,7 @@ impl ExecutionController for MockExecutionController { self.0 .lock() .send(MockExecutionControllerMessage::IsDenunciationExecuted { - de_idx: denunciation_index.clone(), + de_idx: *denunciation_index, response_tx, }) { diff --git a/massa-execution-worker/Cargo.toml b/massa-execution-worker/Cargo.toml index 0c05e4cb490..6bca608e667 100644 --- a/massa-execution-worker/Cargo.toml +++ b/massa-execution-worker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_execution_worker" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -34,7 +34,8 @@ massa_time = { path = "../massa-time" } massa_ledger_exports = { path = "../massa-ledger-exports" } massa_pos_exports = { path = "../massa-pos-exports" } massa_final_state = { path = "../massa-final-state" } -massa_versioning_worker = { path = "../massa-versioning-worker" } +massa_versioning = { path = "../massa-versioning" } +massa_db = { path = "../massa-db" } [dev-dependencies] tokio = { version = "1.23", features = ["sync"] } diff --git a/massa-execution-worker/src/active_history.rs b/massa-execution-worker/src/active_history.rs index 7a47a4a677a..3c1188e1012 100644 --- a/massa-execution-worker/src/active_history.rs +++ b/massa-execution-worker/src/active_history.rs @@ -1,6 +1,7 @@ +use massa_async_pool::{AsyncMessage, AsyncMessageId, AsyncMessageUpdate}; use massa_execution_exports::ExecutionOutput; use massa_ledger_exports::{ - LedgerEntry, LedgerEntryUpdate, SetOrDelete, SetOrKeep, SetUpdateOrDelete, + Applicable, LedgerEntry, LedgerEntryUpdate, SetOrDelete, SetOrKeep, SetUpdateOrDelete, }; use massa_models::denunciation::DenunciationIndex; use massa_models::{ @@ -60,6 +61,37 @@ impl ActiveHistory { HistorySearchResult::NoInfo } + /// Lazily query (from end to beginning) a message based on its id + /// + /// Returns a `HistorySearchResult`. + pub fn fetch_message( + &self, + message_id: &AsyncMessageId, + mut current_updates: AsyncMessageUpdate, + ) -> HistorySearchResult> { + for history_element in self.0.iter().rev() { + match history_element + .state_changes + .async_pool_changes + .0 + .get(message_id) + { + Some(SetUpdateOrDelete::Set(msg)) => { + let mut msg = msg.clone(); + msg.apply(current_updates); + return HistorySearchResult::Present(SetUpdateOrDelete::Set(msg)); + } + Some(SetUpdateOrDelete::Update(msg_update)) => { + current_updates.apply(msg_update.clone()); + } + Some(SetUpdateOrDelete::Delete) => return HistorySearchResult::Absent, + _ => (), + } + } + + HistorySearchResult::Present(SetUpdateOrDelete::Update(current_updates)) + } + /// Lazily query (from end to beginning) the active list of executed denunciations. /// /// Returns a `HistorySearchResult`. diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 2253ca0a3fd..70b81a45611 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -13,16 +13,18 @@ use crate::speculative_executed_ops::SpeculativeExecutedOps; use crate::speculative_ledger::SpeculativeLedger; use crate::vesting_manager::VestingManager; use crate::{active_history::ActiveHistory, speculative_roll_state::SpeculativeRollState}; -use massa_async_pool::{AsyncMessage, AsyncMessageId}; +use massa_async_pool::{AsyncMessage, AsyncPoolChanges}; use massa_executed_ops::{ExecutedDenunciationsChanges, ExecutedOpsChanges}; use massa_execution_exports::{ EventStore, ExecutionConfig, ExecutionError, ExecutionOutput, ExecutionStackElement, }; use massa_final_state::{FinalState, StateChanges}; +use massa_hash::Hash; use massa_ledger_exports::LedgerChanges; -use massa_models::address::{ExecutionAddressCycleInfo, SCAddress}; +use massa_models::address::ExecutionAddressCycleInfo; use massa_models::bytecode::Bytecode; use massa_models::denunciation::DenunciationIndex; +use massa_models::timeslots::get_block_slot_timestamp; use massa_models::{ address::Address, amount::Amount, @@ -33,6 +35,9 @@ use massa_models::{ }; use massa_module_cache::controller::ModuleCache; use massa_pos_exports::PoSChanges; +use massa_versioning::address_factory::{AddressArgs, AddressFactory}; +use massa_versioning::versioning::MipStore; +use massa_versioning::versioning_factory::{FactoryStrategy, VersioningFactory}; use parking_lot::RwLock; use rand::SeedableRng; use rand_xoshiro::Xoshiro256PlusPlus; @@ -47,7 +52,7 @@ pub struct ExecutionContextSnapshot { pub ledger_changes: LedgerChanges, /// speculative asynchronous pool messages emitted so far in the context - pub async_pool_changes: Vec<(AsyncMessageId, AsyncMessage)>, + pub async_pool_changes: AsyncPoolChanges, /// speculative list of operations executed pub executed_ops: ExecutedOpsChanges, @@ -154,6 +159,9 @@ pub struct ExecutionContext { // Vesting Manager pub vesting_manager: Arc, + + // Address factory + pub address_factory: AddressFactory, } impl ExecutionContext { @@ -173,6 +181,7 @@ impl ExecutionContext { active_history: Arc>, module_cache: Arc>, vesting_manager: Arc, + mip_store: MipStore, ) -> Self { ExecutionContext { speculative_ledger: SpeculativeLedger::new( @@ -215,6 +224,7 @@ impl ExecutionContext { module_cache, config, vesting_manager, + address_factory: AddressFactory { mip_store }, } } @@ -293,6 +303,7 @@ impl ExecutionContext { active_history: Arc>, module_cache: Arc>, vesting_manager: Arc, + mip_store: MipStore, ) -> Self { // Deterministically seed the unsafe RNG to allow the bytecode to use it. // Note that consecutive read-only calls for the same slot will get the same random seed. @@ -322,6 +333,7 @@ impl ExecutionContext { active_history, module_cache, vesting_manager, + mip_store, ) } } @@ -356,6 +368,7 @@ impl ExecutionContext { /// /// # returns /// A `ExecutionContext` instance + #[allow(clippy::too_many_arguments)] pub(crate) fn active_slot( config: ExecutionConfig, slot: Slot, @@ -364,6 +377,7 @@ impl ExecutionContext { active_history: Arc>, module_cache: Arc>, vesting_manager: Arc, + mip_store: MipStore, ) -> Self { // Deterministically seed the unsafe RNG to allow the bytecode to use it. @@ -391,6 +405,7 @@ impl ExecutionContext { active_history, module_cache, vesting_manager, + mip_store, ) } } @@ -449,6 +464,13 @@ impl ExecutionContext { // https://github.com/massalabs/massa/issues/2331 // deterministically generate a new unique smart contract address + let slot_timestamp = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + self.slot, + ) + .expect("could not compute current slot timestamp"); // create a seed from the current slot let mut data: Vec = self.slot.to_bytes_key().to_vec(); @@ -462,7 +484,11 @@ impl ExecutionContext { data.push(1u8); } // hash the seed to get a unique address - let address = Address::SC(SCAddress(massa_hash::Hash::compute_from(&data))); + let hash = Hash::compute_from(&data); + let address = self.address_factory.create( + &AddressArgs::SC { hash }, + FactoryStrategy::At(slot_timestamp), + )?; // add this address with its bytecode to the speculative ledger self.speculative_ledger.create_new_sc_address( @@ -989,7 +1015,7 @@ impl ExecutionContext { /// pub fn insert_executed_denunciation(&mut self, denunciation_idx: &DenunciationIndex) { self.speculative_executed_denunciations - .insert_executed_denunciation(denunciation_idx.clone()); + .insert_executed_denunciation(*denunciation_idx); } /// gets the cycle information for an address diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 56a161f19b5..9f8cac79e5c 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -14,6 +14,7 @@ use crate::interface_impl::InterfaceImpl; use crate::stats::ExecutionStatsCounter; use crate::vesting_manager::VestingManager; use massa_async_pool::AsyncMessage; +use massa_db::DBBatch; use massa_execution_exports::{ EventStore, ExecutionChannels, ExecutionConfig, ExecutionError, ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionOutput, ReadOnlyExecutionRequest, @@ -40,7 +41,7 @@ use massa_module_cache::controller::ModuleCache; use massa_pos_exports::SelectorController; use massa_sc_runtime::{Interface, Response, VMError}; use massa_storage::Storage; -use massa_versioning_worker::versioning::MipStore; +use massa_versioning::versioning::MipStore; use parking_lot::{Mutex, RwLock}; use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::sync::Arc; @@ -108,7 +109,12 @@ impl ExecutionState { ) -> ExecutionState { // Get the slot at the output of which the final state is attached. // This should be among the latest final slots. - let last_final_slot = final_state.read().slot; + let last_final_slot = final_state + .read() + .db + .read() + .get_change_id() + .expect("Critical error: Final state has no slot attached"); // Create default active history let active_history: Arc> = Default::default(); @@ -144,6 +150,7 @@ impl ExecutionState { active_history.clone(), module_cache.clone(), vesting_manager.clone(), + mip_store.clone(), ))); // Instantiate the interface providing ABI access to the VM, share the execution context with it @@ -989,6 +996,7 @@ impl ExecutionState { self.active_history.clone(), self.module_cache.clone(), self.vesting_manager.clone(), + self.mip_store.clone(), ); // Get asynchronous messages to execute @@ -1259,7 +1267,10 @@ impl ExecutionState { // apply the cached output and return self.apply_final_execution_output(exec_out.clone()); - debug!("execute_final_slot: found in cache, applied cache"); + // update versioning stats + self.update_versioning_stats(exec_target, slot); + + debug!("execute_final_slot: found in cache, applied cache and updated versioning stats"); // Broadcast a final slot execution output to active channel subscribers. if self.config.broadcast_enabled { @@ -1372,6 +1383,7 @@ impl ExecutionState { self.active_history.clone(), self.module_cache.clone(), self.vesting_manager.clone(), + self.mip_store.clone(), ); // run the interpreter according to the target type @@ -1554,15 +1566,8 @@ impl ExecutionState { match cycle.checked_sub(3) { Some(lookback_cycle) => { - let lookback_cycle_index = - match final_state.pos_state.get_cycle_index(lookback_cycle) { - Some(v) => v, - None => Default::default(), - }; // get rolls - final_state.pos_state.cycle_history[lookback_cycle_index] - .roll_counts - .clone() + final_state.pos_state.get_all_roll_counts(lookback_cycle) } None => final_state.pos_state.initial_rolls.clone(), } @@ -1695,7 +1700,7 @@ impl ExecutionState { ) { // update versioning statistics if let Some((block_id, storage)) = exec_target { - if let Some(_block) = storage.read_blocks().get(block_id) { + if let Some(block) = storage.read_blocks().get(block_id) { let slot_ts_ = get_block_slot_timestamp( self.config.thread_count, self.config.t0, @@ -1703,10 +1708,44 @@ impl ExecutionState { *slot, ); + let current_version = block.content.header.content.current_version; + let announced_version = block.content.header.content.announced_version; if let Ok(slot_ts) = slot_ts_ { - // TODO - Next PR: use block header network versions - default to 0 for now + self.mip_store.update_network_version_stats( + slot_ts, + Some((current_version, announced_version)), + ); + + // Now write mip store changes to disk (if any) + let mut db_batch = DBBatch::new(); + let mut db_versioning_batch = DBBatch::new(); + // Unwrap/Expect because if something fails we can only panic here + let slot_prev_ts = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + slot.get_prev_slot(self.config.thread_count).unwrap(), + ) + .unwrap(); + self.mip_store - .update_network_version_stats(slot_ts, Some((0, 0))); + .update_batches( + &mut db_batch, + &mut db_versioning_batch, + (&slot_prev_ts, &slot_ts), + ) + .unwrap_or_else(|e| { + panic!( + "Unable to get MIP store changes between {} and {}: {}", + slot_prev_ts, slot_ts, e + ) + }); + + self.final_state.write().db.write().write_batch( + db_batch, + db_versioning_batch, + None, + ); } else { warn!("Unable to get slot timestamp for slot: {} in order to update mip_store stats", slot); } diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index 9e6b618d519..94b82cd396d 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -69,7 +69,11 @@ impl InterfaceImpl { operation_datastore: Option, ) -> InterfaceImpl { use massa_ledger_exports::{LedgerEntry, SetUpdateOrDelete}; + use massa_models::config::{ + MIP_STORE_STATS_BLOCK_CONSIDERED, MIP_STORE_STATS_COUNTERS_MAX, + }; use massa_module_cache::{config::ModuleCacheConfig, controller::ModuleCache}; + use massa_versioning::versioning::{MipStatsConfig, MipStore}; use parking_lot::RwLock; let vesting_file = super::tests::get_initials_vesting(false); @@ -95,12 +99,21 @@ impl InterfaceImpl { .unwrap(), ); + // create an empty default store + let mip_stats_config = MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }; + let mip_store = + MipStore::try_from(([], mip_stats_config)).expect("Cannot create an empty MIP store"); + let mut execution_context = ExecutionContext::new( config.clone(), final_state, Default::default(), module_cache, vesting_manager, + mip_store, ); execution_context.stack = vec![ExecutionStackElement { address: sender_addr, @@ -761,6 +774,7 @@ impl Interface for InterfaceImpl { }) }) .transpose()?, + None, )); execution_context.created_message_index += 1; Ok(()) diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index 4d6d5395f99..92c23964adc 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -82,6 +82,8 @@ #![feature(map_try_insert)] #![feature(let_chains)] #![feature(option_get_or_insert_default)] +#![feature(drain_filter)] +#![feature(btree_drain_filter)] mod active_history; mod context; @@ -108,7 +110,7 @@ pub use worker::start_execution_worker; ))] pub use interface_impl::InterfaceImpl; -#[cfg(any(feature = "benchmarking"))] +#[cfg(feature = "benchmarking")] use criterion as _; #[cfg(any( diff --git a/massa-execution-worker/src/speculative_async_pool.rs b/massa-execution-worker/src/speculative_async_pool.rs index 918013ebe9e..f09035737b4 100644 --- a/massa-execution-worker/src/speculative_async_pool.rs +++ b/massa-execution-worker/src/speculative_async_pool.rs @@ -3,27 +3,27 @@ //! The speculative asynchronous pool represents the state of //! the pool at an arbitrary execution slot. -use crate::active_history::ActiveHistory; -use massa_async_pool::{AsyncMessage, AsyncMessageId, AsyncPool, AsyncPoolChanges}; +use crate::active_history::{ActiveHistory, HistorySearchResult::Present}; +use massa_async_pool::{ + AsyncMessage, AsyncMessageId, AsyncMessageInfo, AsyncMessageTrigger, AsyncMessageUpdate, + AsyncPoolChanges, +}; use massa_final_state::FinalState; -use massa_ledger_exports::LedgerChanges; +use massa_ledger_exports::{Applicable, LedgerChanges, SetUpdateOrDelete}; use massa_models::slot::Slot; use parking_lot::RwLock; -use std::sync::Arc; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; -/// The `SpeculativeAsyncPool` holds a copy of the final state asynchronous pool -/// to which it applies the previous changes. -/// The `SpeculativeAsyncPool` manipulates this copy to compute the full pool -/// while keeping track of all the newly added changes. pub(crate) struct SpeculativeAsyncPool { - /// Copy of the final asynchronous pool with the previous changes applied - async_pool: AsyncPool, - - /// List of newly emitted asynchronous messages - emitted: Vec<(AsyncMessageId, AsyncMessage)>, - - /// List of changes (additions/deletions/activation) to the pool after settling emitted messages - settled_changes: AsyncPoolChanges, + final_state: Arc>, + active_history: Arc>, + // current speculative pool changes + pool_changes: AsyncPoolChanges, + // Used to know which messages we want to take + message_infos: BTreeMap, } impl SpeculativeAsyncPool { @@ -34,16 +34,33 @@ impl SpeculativeAsyncPool { final_state: Arc>, active_history: Arc>, ) -> Self { - // deduce speculative async pool from history - let mut async_pool = final_state.read().async_pool.clone(); + let mut message_infos = final_state.read().async_pool.message_info_cache.clone(); + for history_item in active_history.read().0.iter() { - async_pool.apply_changes_unchecked(&history_item.state_changes.async_pool_changes); + for change in history_item.state_changes.async_pool_changes.0.iter() { + match change { + (id, SetUpdateOrDelete::Set(message)) => { + message_infos.insert(*id, AsyncMessageInfo::from(message.clone())); + } + + (id, SetUpdateOrDelete::Update(message_update)) => { + message_infos.entry(*id).and_modify(|message_info| { + message_info.apply(message_update.clone()); + }); + } + + (id, SetUpdateOrDelete::Delete) => { + message_infos.remove(id); + } + } + } } SpeculativeAsyncPool { - async_pool, - emitted: Default::default(), - settled_changes: Default::default(), + final_state, + active_history, + pool_changes: Default::default(), + message_infos, } } @@ -51,22 +68,23 @@ impl SpeculativeAsyncPool { /// and resets their local value to nothing. /// This must be called after `settle_emitted_messages()` pub fn take(&mut self) -> AsyncPoolChanges { - std::mem::take(&mut self.settled_changes) + std::mem::take(&mut self.pool_changes) } /// Takes a snapshot (clone) of the emitted messages - pub fn get_snapshot(&self) -> Vec<(AsyncMessageId, AsyncMessage)> { - self.emitted.clone() + pub fn get_snapshot(&self) -> AsyncPoolChanges { + self.pool_changes.clone() } /// Resets the `SpeculativeAsyncPool` emitted messages to a snapshot (see `get_snapshot` method) - pub fn reset_to_snapshot(&mut self, snapshot: Vec<(AsyncMessageId, AsyncMessage)>) { - self.emitted = snapshot; + pub fn reset_to_snapshot(&mut self, snapshot: AsyncPoolChanges) { + self.pool_changes = snapshot; } /// Add a new message to the list of changes of this `SpeculativeAsyncPool` pub fn push_new_message(&mut self, msg: AsyncMessage) { - self.emitted.push((msg.compute_id(), msg)); + self.pool_changes.push_add(msg.compute_id(), msg.clone()); + self.message_infos.insert(msg.compute_id(), msg.into()); } /// Takes a batch of asynchronous messages to execute, @@ -83,15 +101,34 @@ impl SpeculativeAsyncPool { slot: Slot, max_gas: u64, ) -> Vec<(AsyncMessageId, AsyncMessage)> { - // take a batch of messages, removing it from the async pool - let msgs = self.async_pool.take_batch_to_execute(slot, max_gas); + let mut available_gas = max_gas; + + // Choose which messages to take based on self.message_infos + // (all messages are considered: finals, in active_history and in speculative) - // settle deletions - for (msg_id, _msg) in &msgs { - self.settled_changes.push_delete(*msg_id); + let mut wanted_messages = Vec::new(); + + let message_infos = self.message_infos.clone(); + + for (message_id, message_info) in message_infos.iter() { + if available_gas >= message_info.max_gas + && slot >= message_info.validity_start + && slot < message_info.validity_end + && message_info.can_be_executed + { + available_gas -= message_info.max_gas; + + wanted_messages.push(message_id); + } } - msgs + let taken = self.fetch_msgs(wanted_messages, true); + + for (message_id, _) in taken.iter() { + self.message_infos.remove(message_id); + } + + taken } /// Settle a slot. @@ -108,18 +145,155 @@ impl SpeculativeAsyncPool { slot: &Slot, ledger_changes: &LedgerChanges, ) -> Vec<(AsyncMessageId, AsyncMessage)> { - let (deleted_messages, triggered_messages) = - self.async_pool - .settle_slot(slot, &mut self.emitted, ledger_changes); - for (msg_id, msg) in std::mem::take(&mut self.emitted) { - self.settled_changes.push_add(msg_id, msg); + // Update the messages_info: remove messages that should be removed + // Filter out all messages for which the validity end is expired. + // Note that the validity_end bound is NOT included in the validity interval of the message. + + let mut eliminated_infos: Vec<_> = self + .message_infos + .drain_filter(|_k, v| *slot >= v.validity_end) + .collect(); + + let eliminated_new_messages: Vec<_> = self + .pool_changes + .0 + .drain_filter(|_k, v| match v { + SetUpdateOrDelete::Set(v) => *slot >= v.validity_end, + SetUpdateOrDelete::Update(_v) => false, + SetUpdateOrDelete::Delete => false, + }) + .collect(); + + eliminated_infos.extend(eliminated_new_messages.iter().filter_map(|(k, v)| match v { + SetUpdateOrDelete::Set(v) => Some((*k, AsyncMessageInfo::from(v.clone()))), + SetUpdateOrDelete::Update(_v) => None, + SetUpdateOrDelete::Delete => None, + })); + + // Truncate message pool to its max size, removing non-prioritary items + let excess_count = self + .message_infos + .len() + .saturating_sub(self.final_state.read().async_pool.config.max_length as usize); + + eliminated_infos.reserve_exact(excess_count); + for _ in 0..excess_count { + eliminated_infos.push(self.message_infos.pop_last().unwrap()); // will not panic (checked at excess_count computation) } - for (msg_id, _msg) in deleted_messages.iter() { - self.settled_changes.push_delete(*msg_id); + + // Activate the messages that can be activated (triggered) + let mut triggered_info = Vec::new(); + for (id, message_info) in self.message_infos.iter_mut() { + if let Some(filter) = &message_info.trigger /*&& !message_info.can_be_executed*/ && is_triggered(filter, ledger_changes) + { + message_info.can_be_executed = true; + triggered_info.push((*id, message_info.clone())); + } } - for (msg_id, _msg) in triggered_messages.iter() { - self.settled_changes.push_activate(*msg_id); + + // Query triggered messages + let triggered_msg = + self.fetch_msgs(triggered_info.iter().map(|(id, _)| id).collect(), false); + + for (msg_id, _msg) in triggered_msg.iter() { + self.pool_changes.push_activate(*msg_id); } - deleted_messages + + // Query eliminated messages + let eliminated_msg = + self.fetch_msgs(eliminated_infos.iter().map(|(id, _)| id).collect(), true); + + eliminated_msg + } + + fn fetch_msgs( + &mut self, + mut wanted_ids: Vec<&AsyncMessageId>, + delete_existing: bool, + ) -> Vec<(AsyncMessageId, AsyncMessage)> { + let mut msgs = Vec::new(); + + let mut current_changes = HashMap::new(); + for id in wanted_ids.iter() { + current_changes.insert(*id, AsyncMessageUpdate::default()); + } + + let pool_changes_clone = self.pool_changes.clone(); + + // First, look in speculative pool + wanted_ids.drain_filter( + |&mut message_id| match pool_changes_clone.0.get(message_id) { + Some(SetUpdateOrDelete::Set(msg)) => { + if delete_existing { + self.pool_changes.push_delete(*message_id); + } + msgs.push((*message_id, msg.clone())); + true + } + Some(SetUpdateOrDelete::Update(msg_update)) => { + current_changes.entry(message_id).and_modify(|e| { + e.apply(msg_update.clone()); + }); + false + } + Some(SetUpdateOrDelete::Delete) => false, + None => false, + }, + ); + + // Then, search the active history + wanted_ids.drain_filter(|&mut message_id| { + match self.active_history.read().fetch_message( + message_id, + current_changes.get(message_id).cloned().unwrap_or_default(), + ) { + Present(SetUpdateOrDelete::Set(mut msg)) => { + msg.apply(current_changes.get(message_id).cloned().unwrap_or_default()); + if delete_existing { + self.pool_changes.push_delete(*message_id); + } + msgs.push((*message_id, msg)); + return true; + } + Present(SetUpdateOrDelete::Update(msg_update)) => { + current_changes.entry(message_id).and_modify(|e| { + e.apply(msg_update.clone()); + }); + return false; + } + _ => {} + } + false + }); + + // Then, fetch all the remaining messages from the final state + let fetched_msgs = self + .final_state + .read() + .async_pool + .fetch_messages(wanted_ids); + + for (message_id, message) in fetched_msgs { + if let Some(msg) = message { + let mut msg = msg.clone(); + msg.apply(current_changes.get(message_id).cloned().unwrap_or_default()); + if delete_existing { + self.pool_changes.push_delete(*message_id); + } + msgs.push((*message_id, msg)); + } + } + + msgs + } + + #[cfg(any(test, feature = "test"))] + pub fn get_message_infos(&self) -> BTreeMap { + self.message_infos.clone() } } + +/// Check in the ledger changes if a message trigger has been triggered +fn is_triggered(filter: &AsyncMessageTrigger, ledger_changes: &LedgerChanges) -> bool { + ledger_changes.has_changes(&filter.address, filter.datastore_key.clone()) +} diff --git a/massa-execution-worker/src/speculative_roll_state.rs b/massa-execution-worker/src/speculative_roll_state.rs index f08ec881abd..a7769c8a0b2 100644 --- a/massa-execution-worker/src/speculative_roll_state.rs +++ b/massa-execution-worker/src/speculative_roll_state.rs @@ -331,12 +331,11 @@ impl SpeculativeRollState { let final_state = self.final_state.read(); for (slot, addr_amount) in final_state .pos_state - .deferred_credits + .get_deferred_credits_range(min_slot..) .credits - .range(min_slot..) { if let Some(amount) = addr_amount.get(address) { - let _ = res.try_insert(*slot, *amount); + let _ = res.try_insert(slot, *amount); }; } } @@ -369,7 +368,6 @@ impl SpeculativeRollState { .final_state .read() .pos_state - .deferred_credits .get_address_credits_for_slot(addr, slot) { return Some(v); @@ -391,20 +389,27 @@ impl SpeculativeRollState { let final_state = self.final_state.read(); // add finals - final_state.pos_state.cycle_history.iter().for_each(|c| { - let mut cur_item = ExecutionAddressCycleInfo { - cycle: c.cycle, - is_final: c.complete, - ok_count: 0, - nok_count: 0, - active_rolls: None, // will be filled afterwards - }; - if let Some(prod_stats) = c.production_stats.get(address) { - cur_item.ok_count = prod_stats.block_success_count; - cur_item.nok_count = prod_stats.block_failure_count; - } - res.push(cur_item); - }); + final_state + .pos_state + .cycle_history_cache + .iter() + .for_each(|c| { + let mut cur_item = ExecutionAddressCycleInfo { + cycle: c.0, + is_final: c.1, + ok_count: 0, + nok_count: 0, + active_rolls: None, // will be filled afterwards + }; + if let Some(prod_stats) = final_state + .pos_state + .get_production_stats_for_address(c.0, *address) + { + cur_item.ok_count = prod_stats.block_success_count; + cur_item.nok_count = prod_stats.block_failure_count; + } + res.push(cur_item); + }); // add active history // note that a last cycle might overlap between final and active histories @@ -531,9 +536,9 @@ impl SpeculativeRollState { if let Some(final_stats) = final_state.pos_state.get_all_production_stats(cycle) { for (addr, stats) in final_stats { accumulated_stats - .entry(*addr) - .and_modify(|cur| cur.extend(stats)) - .or_insert_with(|| *stats); + .entry(addr) + .and_modify(|cur| cur.extend(&stats)) + .or_insert_with(|| stats); } underflow = false; } diff --git a/massa-execution-worker/src/tests/mock.rs b/massa-execution-worker/src/tests/mock.rs index 87dce0ffc61..eea9b1e81b2 100644 --- a/massa-execution-worker/src/tests/mock.rs +++ b/massa-execution-worker/src/tests/mock.rs @@ -1,9 +1,10 @@ +use massa_db::{DBBatch, MassaDB, MassaDBConfig}; use massa_execution_exports::ExecutionError; use massa_final_state::{FinalState, FinalStateConfig}; use massa_hash::Hash; use massa_ledger_exports::{LedgerConfig, LedgerController, LedgerEntry, LedgerError}; use massa_ledger_worker::FinalLedger; -use massa_models::config::ENDORSEMENT_COUNT; +use massa_models::config::{ENDORSEMENT_COUNT, GENESIS_TIMESTAMP, T0}; use massa_models::denunciation::Denunciation; use massa_models::execution::TempFileVestingRange; use massa_models::prehash::PreHashMap; @@ -21,6 +22,7 @@ use massa_pos_exports::SelectorConfig; use massa_pos_worker::start_selector_worker; use massa_signature::KeyPair; use massa_time::MassaTime; +use massa_versioning::versioning::{MipStatsConfig, MipStore}; use parking_lot::RwLock; use std::str::FromStr; use std::{ @@ -36,57 +38,24 @@ fn get_initials() -> (NamedTempFile, HashMap) { let mut rolls: BTreeMap = BTreeMap::new(); let mut ledger: HashMap = HashMap::new(); - // thread 0 / 31 - let keypair_0 = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); - let addr_0 = Address::from_public_key(&keypair_0.get_public_key()); - rolls.insert(addr_0, 100); - ledger.insert( - addr_0, - LedgerEntry { - balance: Amount::from_str("300_000").unwrap(), - ..Default::default() - }, - ); - - // thread 1 / 31 - let keypair_1 = - KeyPair::from_str("S1kEBGgxHFBdsNC4HtRHhsZsB5irAtYHEmuAKATkfiomYmj58tm").unwrap(); - let addr_1 = Address::from_public_key(&keypair_1.get_public_key()); - rolls.insert(addr_1, 100); - ledger.insert( - addr_1, - LedgerEntry { - balance: Amount::from_str("300_000").unwrap(), - ..Default::default() - }, - ); - - // thread 2 / 31 - let keypair_2 = - KeyPair::from_str("S12APSAzMPsJjVGWzUJ61ZwwGFTNapA4YtArMKDyW4edLu6jHvCr").unwrap(); - let addr_2 = Address::from_public_key(&keypair_2.get_public_key()); - rolls.insert(addr_2, 100); - ledger.insert( - addr_2, - LedgerEntry { - balance: Amount::from_str("300_000").unwrap(), - ..Default::default() - }, - ); - - // thread 3 / 31 - let keypair_3 = - KeyPair::from_str("S12onbtxzgHcDSrVMp9bzP1cUjno8V5hZd4yYiqaMmC3nq4z7fSv").unwrap(); - let addr_3 = Address::from_public_key(&keypair_3.get_public_key()); - rolls.insert(addr_3, 100); - ledger.insert( - addr_3, - LedgerEntry { - balance: Amount::from_str("300_000").unwrap(), - ..Default::default() - }, - ); + let raw_keypairs = [ + "S18r2i8oJJyhF7Kprx98zwxAc3W4szf7RKuVMX6JydZz8zSxHeC", // thread 0 + "S1FpYC4ugG9ivZZbLVrTwWtF9diSRiAwwrVX5Gx1ANSRLfouUjq", // thread 1 + "S1LgXhWLEgAgCX3nm6y8PVPzpybmsYpi6yg6ZySwu5Z4ERnD7Bu", // thread 2 + ]; + + for s in raw_keypairs { + let keypair = KeyPair::from_str(s).unwrap(); + let addr = Address::from_public_key(&keypair.get_public_key()); + rolls.insert(addr, 100); + ledger.insert( + addr, + LedgerEntry { + balance: Amount::from_str("300_000").unwrap(), + ..Default::default() + }, + ); + } // write file serde_json::to_writer_pretty::<&File, BTreeMap>(file.as_file(), &rolls) @@ -102,7 +71,7 @@ fn get_initials() -> (NamedTempFile, HashMap) { /// to the address. #[allow(dead_code)] // to avoid warnings on gas_calibration feature pub fn get_random_address_full() -> (Address, KeyPair) { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); (Address::from_public_key(&keypair.get_public_key()), keypair) } @@ -111,7 +80,15 @@ pub fn get_sample_state( ) -> Result<(Arc>, NamedTempFile, TempDir), LedgerError> { let (rolls_file, ledger) = get_initials(); let (ledger_config, tempfile, tempdir) = LedgerConfig::sample(&ledger); - let mut ledger = FinalLedger::new(ledger_config.clone(), false); + let db_config = MassaDBConfig { + path: tempdir.path().to_path_buf(), + max_history_length: 10, + max_new_elements: 100, + thread_count: THREAD_COUNT, + }; + let db = Arc::new(RwLock::new(MassaDB::new(db_config))); + + let mut ledger = FinalLedger::new(ledger_config.clone(), db.clone()); ledger.load_initial_ledger().unwrap(); let default_config = FinalStateConfig::default(); let cfg = FinalStateConfig { @@ -127,24 +104,49 @@ pub fn get_sample_state( max_executed_denunciations_length: 1000, initial_seed_string: "".to_string(), periods_per_cycle: 10, - max_denunciations_per_block_header: 0, + t0: T0, + genesis_timestamp: *GENESIS_TIMESTAMP, }; let (_, selector_controller) = start_selector_worker(SelectorConfig::default()) .expect("could not start selector controller"); + let mip_store = MipStore::try_from(( + [], + MipStatsConfig { + block_count_considered: 10, + counters_max: 10, + }, + )) + .unwrap(); + let mut final_state = if last_start_period > 0 { FinalState::new_derived_from_snapshot( + db.clone(), cfg, Box::new(ledger), selector_controller, + mip_store, last_start_period, ) .unwrap() } else { - FinalState::new(cfg, Box::new(ledger), selector_controller).unwrap() + FinalState::new( + db.clone(), + cfg, + Box::new(ledger), + selector_controller, + mip_store, + true, + ) + .unwrap() }; + let mut batch: BTreeMap, Option>> = DBBatch::new(); + final_state.pos_state.create_initial_cycle(&mut batch); + final_state + .db + .write() + .write_batch(batch, Default::default(), None); final_state.compute_initial_draws().unwrap(); - final_state.pos_state.create_initial_cycle(); Ok((Arc::new(RwLock::new(final_state)), tempfile, tempdir)) } @@ -167,6 +169,8 @@ pub fn create_block( let header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot, parents: vec![], operation_merkle_root, @@ -200,24 +204,24 @@ pub fn get_initials_vesting(with_value: bool) -> NamedTempFile { let vec = vec![ TempFileVestingRange { - timestamp: MassaTime::from(SEC_TIMESTAMP), + timestamp: MassaTime::from_millis(SEC_TIMESTAMP), min_balance: Some(Amount::from_str("100000").unwrap()), max_rolls: Some(50), }, TempFileVestingRange { - timestamp: MassaTime::from(FUTURE_TIMESTAMP), + timestamp: MassaTime::from_millis(FUTURE_TIMESTAMP), min_balance: Some(Amount::from_str("80000").unwrap()), max_rolls: None, }, TempFileVestingRange { - timestamp: MassaTime::from(PAST_TIMESTAMP), + timestamp: MassaTime::from_millis(PAST_TIMESTAMP), min_balance: Some(Amount::from_str("150000").unwrap()), max_rolls: Some(30), }, ]; let keypair_0 = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + KeyPair::from_str("S18r2i8oJJyhF7Kprx98zwxAc3W4szf7RKuVMX6JydZz8zSxHeC").unwrap(); let addr_0 = Address::from_public_key(&keypair_0.get_public_key()); map.insert(addr_0, vec); diff --git a/massa-execution-worker/src/tests/scenarios_mandatories.rs b/massa-execution-worker/src/tests/scenarios_mandatories.rs index 93f87c2872e..96fe17c6679 100644 --- a/massa-execution-worker/src/tests/scenarios_mandatories.rs +++ b/massa-execution-worker/src/tests/scenarios_mandatories.rs @@ -1,21 +1,30 @@ // Copyright (c) 2022 MASSA LABS #[cfg(test)] mod tests { + use crate::active_history::ActiveHistory; + use crate::speculative_async_pool::SpeculativeAsyncPool; use crate::start_execution_worker; use crate::tests::mock::{ create_block, get_initials_vesting, get_random_address_full, get_sample_state, }; + use massa_async_pool::AsyncMessage; + use massa_db::DBBatch; use massa_execution_exports::{ ExecutionChannels, ExecutionConfig, ExecutionController, ExecutionError, ReadOnlyExecutionRequest, ReadOnlyExecutionTarget, }; + use massa_hash::Hash; use massa_models::config::{ - LEDGER_ENTRY_BASE_SIZE, LEDGER_ENTRY_DATASTORE_BASE_SIZE, MIP_STORE_STATS_BLOCK_CONSIDERED, + LEDGER_ENTRY_BASE_COST, LEDGER_ENTRY_DATASTORE_BASE_SIZE, MIP_STORE_STATS_BLOCK_CONSIDERED, MIP_STORE_STATS_COUNTERS_MAX, }; use massa_models::prehash::PreHashMap; use massa_models::test_exports::gen_endorsements_for_denunciation; - use massa_models::{address::Address, amount::Amount, slot::Slot}; + use massa_models::{ + address::{Address, UserAddress, UserAddressV0}, + amount::Amount, + slot::Slot, + }; use massa_models::{ block_id::BlockId, datastore::Datastore, @@ -27,14 +36,20 @@ mod tests { use massa_signature::KeyPair; use massa_storage::Storage; use massa_time::MassaTime; - use massa_versioning_worker::versioning::{MipStatsConfig, MipStore}; + use massa_versioning::versioning::{MipStatsConfig, MipStore}; use num::rational::Ratio; + use parking_lot::RwLock; use serial_test::serial; + use std::sync::Arc; use std::{ cmp::Reverse, collections::BTreeMap, collections::HashMap, str::FromStr, time::Duration, }; use tokio::sync::broadcast; + const TEST_SK_1: &str = "S18r2i8oJJyhF7Kprx98zwxAc3W4szf7RKuVMX6JydZz8zSxHeC"; + const TEST_SK_2: &str = "S1FpYC4ugG9ivZZbLVrTwWtF9diSRiAwwrVX5Gx1ANSRLfouUjq"; + const TEST_SK_3: &str = "S1LgXhWLEgAgCX3nm6y8PVPzpybmsYpi6yg6ZySwu5Z4ERnD7Bu"; + #[test] #[serial] fn test_execution_shutdown() { @@ -110,8 +125,8 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration let exec_cfg = ExecutionConfig { - t0: 100.into(), - cursor_delay: 0.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -154,6 +169,7 @@ mod tests { is_final: true, }) .expect("readonly execution failed"); + assert_eq!(res.out.slot, Slot::new(1, 0)); assert!(res.gas_cost > 0); assert_eq!(res.out.events.take().len(), 1, "wrong number of events"); @@ -168,6 +184,7 @@ mod tests { is_final: false, }) .expect("readonly execution failed"); + assert!(res.out.slot.period > 8); manager.stop(); @@ -179,7 +196,7 @@ mod tests { storage: &Storage, execution_controller: Box, ) { - let genesis_keypair = KeyPair::generate(); + let genesis_keypair = KeyPair::generate(0).unwrap(); let mut finalized_blocks: HashMap = HashMap::new(); let mut block_storage: PreHashMap = PreHashMap::default(); for thread in 0..config.thread_count { @@ -211,8 +228,8 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration let exec_cfg = ExecutionConfig { - t0: 100.into(), - cursor_delay: 0.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -245,8 +262,7 @@ mod tests { init_execution_worker(&exec_cfg, &storage, controller.clone()); // get random keypair - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // load bytecodes // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/nested_call.wasm"); @@ -258,7 +274,7 @@ mod tests { let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -328,7 +344,7 @@ mod tests { let mut storage = Storage::create_root(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(2, 0), @@ -372,8 +388,8 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration let exec_cfg = ExecutionConfig { - t0: 100.into(), - cursor_delay: 0.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -406,8 +422,7 @@ mod tests { init_execution_worker(&exec_cfg, &storage, controller.clone()); // get random keypair - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // load bytecodes // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/get_call_coins_main.wasm"); @@ -419,7 +434,7 @@ mod tests { let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -467,7 +482,7 @@ mod tests { let mut storage = Storage::create_root(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(2, 0), @@ -524,9 +539,9 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration and the maximum gas for asynchronous messages execution let exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), max_async_gas: 100_000, - cursor_delay: 0.into(), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -559,8 +574,8 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); + // load bytecodes // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/send_message.wasm"); @@ -572,7 +587,7 @@ mod tests { let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -634,9 +649,9 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration and the maximum gas for asynchronous messages execution let exec_cfg = ExecutionConfig { - t0: 100.into(), max_async_gas: 100_000, - cursor_delay: 0.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -670,8 +685,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // load bytecodes // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/send_message.wasm"); @@ -684,7 +698,7 @@ mod tests { let tested_op_id = operation.id.clone(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -746,8 +760,8 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration and cursor delay let exec_cfg = ExecutionConfig { - t0: 100.into(), - cursor_delay: 0.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -781,8 +795,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // load bytecodes // you can check the source code of the following wasm files in massa-unit-tests-src let exec_bytecode = include_bytes!("./wasm/local_execution.wasm"); @@ -798,7 +811,7 @@ mod tests { create_execute_sc_operation(&keypair, call_bytecode, datastore).unwrap(); storage.store_operations(vec![local_exec_op.clone(), local_call_op.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![local_exec_op.clone(), local_call_op.clone()], vec![], Slot::new(1, 0), @@ -834,7 +847,7 @@ mod tests { assert_eq!(events[1].context.call_stack.len(), 1); assert_eq!( events[1].context.call_stack.back().unwrap(), - &Address::from_str("AU12eS5qggxuvqviD5eQ72oM2QhGwnmNbT1BaxVXU4hqQ8rAYXFe").unwrap() + &Address::from_public_key(&keypair.get_public_key()) ); assert_eq!(events[2].data, "one local execution completed"); let amount = Amount::from_raw(events[5].data.parse().unwrap()); @@ -846,7 +859,7 @@ mod tests { assert_eq!(events[5].context.call_stack.len(), 1); assert_eq!( events[1].context.call_stack.back().unwrap(), - &Address::from_str("AU12eS5qggxuvqviD5eQ72oM2QhGwnmNbT1BaxVXU4hqQ8rAYXFe").unwrap() + &Address::from_public_key(&keypair.get_public_key()) ); assert_eq!(events[6].data, "one local call completed"); @@ -870,8 +883,8 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration and cursor delay let exec_cfg = ExecutionConfig { - t0: 100.into(), - cursor_delay: 0.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -905,8 +918,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // load bytecodes // you can check the source code of the following wasm files in massa-unit-tests-src let op_bytecode = include_bytes!("./wasm/deploy_sc.wasm"); @@ -917,7 +929,13 @@ mod tests { // create the block contaning the operation let op = create_execute_sc_operation(&keypair, op_bytecode, datastore.clone()).unwrap(); storage.store_operations(vec![op.clone()]); - let block = create_block(KeyPair::generate(), vec![op], vec![], Slot::new(1, 0)).unwrap(); + let block = create_block( + KeyPair::generate(0).unwrap(), + vec![op], + vec![], + Slot::new(1, 0), + ) + .unwrap(); // store the block in storage storage.store_block(block.clone()); @@ -976,9 +994,9 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration and the maximum gas for asynchronous messages execution let exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), max_async_gas: 1_000_000_000, - cursor_delay: 0.into(), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -1013,8 +1031,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // load bytecode // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/send_message_deploy_condition.wasm"); @@ -1057,12 +1074,11 @@ mod tests { }); // match the events - assert_eq!(events.len(), 2, "Two events were expected"); + assert_eq!(events.len(), 2, "2 events were expected"); assert_eq!(events[0].data, "Triggered"); // keypair associated to thread 1 - let keypair = - KeyPair::from_str("S1kEBGgxHFBdsNC4HtRHhsZsB5irAtYHEmuAKATkfiomYmj58tm").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_2).unwrap(); // load bytecode // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/send_message_wrong_trigger.wasm"); @@ -1090,12 +1106,10 @@ mod tests { }); // match the events - assert!(events.len() == 3, "Three event was expected"); - assert_eq!(events[0].data, "Triggered"); + assert!(events.len() == 3, "3 events were expected"); // keypair associated to thread 2 - let keypair = - KeyPair::from_str("S12APSAzMPsJjVGWzUJ61ZwwGFTNapA4YtArMKDyW4edLu6jHvCr").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_3).unwrap(); // load bytecode // you can check the source code of the following wasm file in massa-unit-tests-src // This line execute the smart contract that will modify the data entry and then trigger the SC. @@ -1119,14 +1133,11 @@ mod tests { // retrieve events emitted by smart contracts let events = controller.get_filtered_sc_output_event(EventFilter { - start: Some(Slot::new(1, 3)), ..Default::default() }); // match the events - assert_eq!(events.len(), 1, "One event was expected"); - assert_eq!(events[0].data, "Triggered"); - assert_eq!(events[0].data, "Triggered"); + assert!(events.len() == 4, "4 events were expected"); manager.stop(); } @@ -1137,8 +1148,8 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration let exec_cfg = ExecutionConfig { - t0: 100.into(), - cursor_delay: 0.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -1172,9 +1183,10 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // generate the sender_keypair and recipient_address - let sender_keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + + let sender_keypair = KeyPair::from_str(TEST_SK_1).unwrap(); let (recipient_address, _keypair) = get_random_address_full(); + // create the operation let operation = Operation::new_verifiable( Operation { @@ -1192,7 +1204,7 @@ mod tests { // create the block containing the transaction operation storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -1222,12 +1234,7 @@ mod tests { Amount::from_str("100") .unwrap() // Storage cost base - .saturating_sub( - exec_cfg - .storage_costs_constants - .ledger_cost_per_byte - .saturating_mul_u64(LEDGER_ENTRY_BASE_SIZE as u64) - ) + .saturating_sub(LEDGER_ENTRY_BASE_COST) ); // stop the execution controller manager.stop(); @@ -1239,8 +1246,8 @@ mod tests { let vesting = get_initials_vesting(true); // setup the period duration let exec_cfg = ExecutionConfig { - t0: 100.into(), - cursor_delay: 0.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -1274,8 +1281,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // generate the sender_keypair and recipient_address - let sender_keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let sender_keypair = KeyPair::from_str(TEST_SK_1).unwrap(); let sender_addr = Address::from_public_key(&sender_keypair.get_public_key()); let (recipient_address, _keypair) = get_random_address_full(); // create the operation @@ -1295,7 +1301,7 @@ mod tests { // create the block containing the transaction operation storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -1316,7 +1322,11 @@ mod tests { std::thread::sleep(Duration::from_millis(100)); // retrieve the event emitted by the execution error - let events = controller.get_filtered_sc_output_event(EventFilter::default()); + let events = controller.get_filtered_sc_output_event(EventFilter { + is_error: Some(true), + ..Default::default() + }); + dbg!(&events); assert!(events[0].data.contains("massa_execution_error")); assert!(events[0] .data @@ -1348,8 +1358,8 @@ mod tests { // setup the period duration let vesting = get_initials_vesting(true); let exec_cfg = ExecutionConfig { - t0: 100.into(), - cursor_delay: 0.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -1384,8 +1394,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // generate the keypair and its corresponding address - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); let address = Address::from_public_key(&keypair.get_public_key()); // create the operation // try to buy 60 rolls so (100+60) and the max rolls specified for this address in vesting is 150 @@ -1402,7 +1411,7 @@ mod tests { // create the block containing the roll buy operation storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -1446,8 +1455,8 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration let exec_cfg = ExecutionConfig { - t0: 100.into(), - cursor_delay: 0.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -1481,8 +1490,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // generate the keypair and its corresponding address - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); let address = Address::from_public_key(&keypair.get_public_key()); // create the operation let operation = Operation::new_verifiable( @@ -1498,7 +1506,7 @@ mod tests { // create the block containing the roll buy operation storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -1537,10 +1545,10 @@ mod tests { // setup the period duration let mut exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), periods_per_cycle: 2, thread_count: 2, - cursor_delay: 0.into(), initial_vesting_path: vesting.path().to_path_buf(), last_start_period: 2, ..Default::default() @@ -1580,8 +1588,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // generate the keypair and its corresponding address - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); let address = Address::from_public_key(&keypair.get_public_key()); // get initial balance @@ -1593,13 +1600,23 @@ mod tests { let roll_sell_2 = 1; let initial_deferred_credits = Amount::from_str("100").unwrap(); + + let mut batch = DBBatch::new(); + // set initial_deferred_credits that will be reimbursed at first block - sample_state.write().pos_state.deferred_credits.insert( - Slot::new(1, 0), - address, - initial_deferred_credits, + sample_state.write().pos_state.put_deferred_credits_entry( + &Slot::new(1, 0), + &address, + &initial_deferred_credits, + &mut batch, ); + sample_state + .write() + .db + .write() + .write_batch(batch, Default::default(), None); + // create operation 1 let operation1 = Operation::new_verifiable( Operation { @@ -1628,7 +1645,7 @@ mod tests { // create the block containing the roll buy operation storage.store_operations(vec![operation1.clone(), operation2.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation1, operation2], vec![], Slot::new(3, 0), @@ -1730,10 +1747,10 @@ mod tests { // setup the period duration let mut exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), periods_per_cycle: 2, thread_count: 2, - cursor_delay: 0.into(), initial_vesting_path: vesting.path().to_path_buf(), last_start_period: 2, roll_count_to_slash_on_denunciation: 3, // Set to 3 to check if config is taken into account @@ -1776,8 +1793,7 @@ mod tests { init_execution_worker(&exec_cfg, &storage, controller.clone()); // generate the keypair and its corresponding address - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); let address = Address::from_public_key(&keypair.get_public_key()); // get initial balance @@ -1819,7 +1835,7 @@ mod tests { // create the block containing the roll buy operation storage.store_operations(vec![operation1.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation1], vec![denunciation.clone(), denunciation, denunciation_2], Slot::new(3, 0), @@ -1892,10 +1908,10 @@ mod tests { // setup the period duration let mut exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), periods_per_cycle: 2, thread_count: 2, - cursor_delay: 0.into(), initial_vesting_path: vesting.path().to_path_buf(), last_start_period: 2, roll_count_to_slash_on_denunciation: 4, // Set to 4 to check if config is taken into account @@ -1938,8 +1954,7 @@ mod tests { init_execution_worker(&exec_cfg, &storage, controller.clone()); // generate the keypair and its corresponding address - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); let address = Address::from_public_key(&keypair.get_public_key()); // get initial balance @@ -1994,7 +2009,7 @@ mod tests { // create the block containing the roll buy operation storage.store_operations(vec![operation1.clone(), operation2.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation1, operation2], vec![denunciation.clone(), denunciation], Slot::new(3, 0), @@ -2070,9 +2085,9 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration and the maximum gas for asynchronous messages execution let exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), max_async_gas: 100_000, - cursor_delay: 0.into(), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -2106,8 +2121,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // load bytecode // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/execution_error.wasm"); @@ -2116,7 +2130,7 @@ mod tests { create_execute_sc_operation(&keypair, bytecode, BTreeMap::default()).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -2134,7 +2148,7 @@ mod tests { Default::default(), block_storage.clone(), ); - std::thread::sleep(Duration::from_millis(10)); + std::thread::sleep(Duration::from_millis(100)); // retrieve the event emitted by the execution error let events = controller.get_filtered_sc_output_event(EventFilter { @@ -2159,9 +2173,9 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration and the maximum gas for asynchronous messages execution let exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), max_async_gas: 100_000, - cursor_delay: 0.into(), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -2195,8 +2209,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // load bytecode // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/datastore.wasm"); @@ -2206,7 +2219,7 @@ mod tests { let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -2245,9 +2258,9 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration and the maximum gas for asynchronous messages execution let exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), max_async_gas: 100_000, - cursor_delay: 0.into(), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -2281,8 +2294,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // load bytecodes // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/set_bytecode_fail.wasm"); @@ -2294,7 +2306,7 @@ mod tests { let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -2331,9 +2343,9 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration and the maximum gas for asynchronous messages execution let exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), max_async_gas: 100_000, - cursor_delay: 0.into(), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -2368,8 +2380,7 @@ mod tests { init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // let address = Address::from_public_key(&keypair.get_public_key()); // load bytecode @@ -2380,7 +2391,7 @@ mod tests { create_execute_sc_operation(&keypair, bytecode, BTreeMap::default()).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -2480,8 +2491,8 @@ mod tests { // Compile the `./wasm_tests` and generate a block with `event_test.wasm` // as data. Then we check if we get an event as expected. let exec_cfg = ExecutionConfig { - t0: 100.into(), - cursor_delay: 0.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -2516,8 +2527,7 @@ mod tests { // create blockclique block at slot (1,1) { let blockclique_block_slot = Slot::new(1, 1); - let keypair = - KeyPair::from_str("S1kEBGgxHFBdsNC4HtRHhsZsB5irAtYHEmuAKATkfiomYmj58tm").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_2).unwrap(); let event_test_data = include_bytes!("./wasm/event_test.wasm"); let operation = create_execute_sc_operation(&keypair, event_test_data, BTreeMap::default()) @@ -2549,8 +2559,7 @@ mod tests { // create blockclique block at slot (1,0) { let blockclique_block_slot = Slot::new(1, 0); - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); let event_test_data = include_bytes!("./wasm/event_test.wasm"); let operation = create_execute_sc_operation(&keypair, event_test_data, BTreeMap::default()) @@ -2589,7 +2598,7 @@ mod tests { let vesting = get_initials_vesting(false); // config let exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -2623,8 +2632,7 @@ mod tests { init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // load bytecode // you can check the source code of the following wasm file in massa-unit-tests-src @@ -2647,7 +2655,7 @@ mod tests { .unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -2734,9 +2742,9 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration and the maximum gas for asynchronous messages execution let exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), max_async_gas: 100_000, - cursor_delay: 0.into(), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -2769,8 +2777,7 @@ mod tests { // initialize the execution system with genesis blocks init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // load bytecode // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/use_builtins.wasm"); @@ -2779,7 +2786,7 @@ mod tests { create_execute_sc_operation(&keypair, bytecode, BTreeMap::default()).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -2829,9 +2836,9 @@ mod tests { let vesting = get_initials_vesting(false); // setup the period duration and the maximum gas for asynchronous messages execution let exec_cfg = ExecutionConfig { - t0: 100.into(), + t0: MassaTime::from_millis(100), + cursor_delay: MassaTime::from_millis(0), max_async_gas: 100_000, - cursor_delay: 0.into(), initial_vesting_path: vesting.path().to_path_buf(), ..ExecutionConfig::default() }; @@ -2865,8 +2872,7 @@ mod tests { init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 - let keypair = - KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + let keypair = KeyPair::from_str(TEST_SK_1).unwrap(); // let address = Address::from_public_key(&keypair.get_public_key()); // load bytecode @@ -2878,7 +2884,7 @@ mod tests { create_execute_sc_operation(&keypair, bytecode, BTreeMap::default()).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block( - KeyPair::generate(), + KeyPair::generate(0).unwrap(), vec![operation], vec![], Slot::new(1, 0), @@ -2929,4 +2935,38 @@ mod tests { // stop the execution controller manager.stop(); } + + #[test] + fn test_take_batch() { + let final_state = get_sample_state(0).unwrap().0; + let active_history = Arc::new(RwLock::new(ActiveHistory::default())); + + let mut speculative_pool = SpeculativeAsyncPool::new(final_state, active_history); + + let address = Address::User(UserAddress::UserAddressV0(UserAddressV0( + Hash::compute_from(b"abc"), + ))); + + for i in 1..10 { + let message = AsyncMessage::new_with_hash( + Slot::new(0, 0), + 0, + address, + address, + "function".to_string(), + i, + Amount::from_str("0.1").unwrap(), + Amount::from_str("0.3").unwrap(), + Slot::new(1, 0), + Slot::new(3, 0), + Vec::new(), + None, + None, + ); + speculative_pool.push_new_message(message) + } + assert_eq!(speculative_pool.get_message_infos().len(), 9); + speculative_pool.take_batch_to_execute(Slot::new(2, 0), 19); + assert_eq!(speculative_pool.get_message_infos().len(), 4); + } } diff --git a/massa-execution-worker/src/tests/tests_active_history.rs b/massa-execution-worker/src/tests/tests_active_history.rs index 03cc83fcb87..90bfac9f8f9 100644 --- a/massa-execution-worker/src/tests/tests_active_history.rs +++ b/massa-execution-worker/src/tests/tests_active_history.rs @@ -7,7 +7,7 @@ mod tests { use massa_final_state::StateChanges; use massa_hash::Hash; - use massa_models::address::{Address, UserAddress}; + use massa_models::address::{Address, UserAddress, UserAddressV0}; use massa_models::amount::Amount; use massa_models::prehash::{CapacityAllocator, PreHashMap}; use massa_pos_exports::{DeferredCredits, PoSChanges}; @@ -19,8 +19,12 @@ mod tests { let slot1 = Slot::new(2, 2); let slot2 = Slot::new(4, 11); - let addr1 = Address::User(UserAddress(Hash::compute_from("AU1".as_bytes()))); - let addr2 = Address::User(UserAddress(Hash::compute_from("AU2".as_bytes()))); + let addr1 = Address::User(UserAddress::UserAddressV0(UserAddressV0( + Hash::compute_from("AU1".as_bytes()), + ))); + let addr2 = Address::User(UserAddress::UserAddressV0(UserAddressV0( + Hash::compute_from("AU2".as_bytes()), + ))); let amount_a1_s1 = Amount::from_raw(500); let amount_a2_s1 = Amount::from_raw(2702); diff --git a/massa-execution-worker/src/tests/tests_vesting_manager.rs b/massa-execution-worker/src/tests/tests_vesting_manager.rs index a05ffdb8b2f..45a2c2f541f 100644 --- a/massa-execution-worker/src/tests/tests_vesting_manager.rs +++ b/massa-execution-worker/src/tests/tests_vesting_manager.rs @@ -31,24 +31,24 @@ mod test { fn test_get_addr_vesting_at_time() { let manager = mock_manager(true); - let keypair_0 = massa_signature::KeyPair::from_str( - "S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ", + let keypair = massa_signature::KeyPair::from_str( + "S18r2i8oJJyhF7Kprx98zwxAc3W4szf7RKuVMX6JydZz8zSxHeC", ) .unwrap(); - let addr = Address::from_public_key(&keypair_0.get_public_key()); + let addr = Address::from_public_key(&keypair.get_public_key()); { // addr not vested let addr2 = Address::from_str("AU1DHJY6zd6oKJPos8gQ6KYqmsTR669wes4ZhttLD9gE7PYUF3Rs").unwrap(); - let timestamp = &MassaTime::from(1678193291000); // 07/03/2023 13h48 + let timestamp = &MassaTime::from_millis(1678193291000); // 07/03/2023 13h48 let vesting = manager.get_addr_vesting_at_time(&addr2, timestamp); assert!(vesting.min_balance.is_none()); assert!(vesting.max_rolls.is_none()); } { - let timestamp = &MassaTime::from(1677675988000); // 01/03/2023 14h06 + let timestamp = &MassaTime::from_millis(1677675988000); // 01/03/2023 14h06 let result = manager.get_addr_vesting_at_time(&addr, timestamp); assert_eq!( result, @@ -60,7 +60,7 @@ mod test { } { - let timestamp = &MassaTime::from(1678193291000); // 07/03/2023 13h48 + let timestamp = &MassaTime::from_millis(1678193291000); // 07/03/2023 13h48 let result = manager.get_addr_vesting_at_time(&addr, timestamp); assert_eq!( result, @@ -72,7 +72,7 @@ mod test { } { - let timestamp = &MassaTime::from(1734786585000); // 21/12/2024 14h09 + let timestamp = &MassaTime::from_millis(1734786585000); // 21/12/2024 14h09 let result = manager.get_addr_vesting_at_time(&addr, timestamp); assert_eq!( result, diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 64ae94533e2..c59009e00fc 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -19,7 +19,7 @@ use massa_models::slot::Slot; use massa_pos_exports::SelectorController; use massa_storage::Storage; use massa_time::MassaTime; -use massa_versioning_worker::versioning::MipStore; +use massa_versioning::versioning::MipStore; use parking_lot::{Condvar, Mutex, RwLock}; use std::sync::Arc; use std::thread; diff --git a/massa-factory-exports/Cargo.toml b/massa-factory-exports/Cargo.toml index 58648d00fa4..efbbd7f8de4 100644 --- a/massa-factory-exports/Cargo.toml +++ b/massa-factory-exports/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_factory_exports" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -8,25 +8,16 @@ edition = "2021" [dependencies] displaydoc = "0.2" -nom = "7.1" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" thiserror = "1.0" -tracing = "0.1" -anyhow = "1.0" -num = { version = "0.4", features = ["serde"] } # custom modules massa_hash = { path = "../massa-hash" } massa_models = { path = "../massa-models" } massa_time = { path = "../massa-time" } -massa_ledger_exports = { path = "../massa-ledger-exports" } -massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } massa_pos_exports = { path = "../massa-pos-exports" } massa_consensus_exports = { path = "../massa-consensus-exports" } massa_pool_exports = { path = "../massa-pool-exports" } massa_protocol_exports = { path = "../massa-protocol-exports" } -massa_execution_exports = { path = "../massa-execution-exports" } massa_storage = { path = "../massa-storage" } # for more information on what are the following features used for, see the cargo.toml at workspace level diff --git a/massa-factory-exports/src/test_exports/config.rs b/massa-factory-exports/src/test_exports/config.rs index b522bcccc47..00840be837f 100644 --- a/massa-factory-exports/src/test_exports/config.rs +++ b/massa-factory-exports/src/test_exports/config.rs @@ -10,7 +10,7 @@ impl Default for FactoryConfig { thread_count: THREAD_COUNT, genesis_timestamp: MassaTime::now().expect("failed to get current time"), t0: T0, - initial_delay: MassaTime::from(0), + initial_delay: MassaTime::from_millis(0), max_block_size: MAX_BLOCK_SIZE as u64, max_block_gas: MAX_GAS_PER_BLOCK, max_operations_per_block: MAX_OPERATIONS_PER_BLOCK, diff --git a/massa-factory-exports/src/test_exports/tools.rs b/massa-factory-exports/src/test_exports/tools.rs index e26d6c738f7..fa27485cf31 100644 --- a/massa-factory-exports/src/test_exports/tools.rs +++ b/massa-factory-exports/src/test_exports/tools.rs @@ -11,6 +11,8 @@ use massa_signature::KeyPair; pub fn create_empty_block(keypair: &KeyPair, slot: &Slot) -> SecureShareBlock { let header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: *slot, parents: Vec::new(), operation_merkle_root: Hash::compute_from(&Vec::new()), diff --git a/massa-factory-worker/Cargo.toml b/massa-factory-worker/Cargo.toml index 6d5934630c7..2b0565600ce 100644 --- a/massa-factory-worker/Cargo.toml +++ b/massa-factory-worker/Cargo.toml @@ -1,16 +1,13 @@ [package] name = "massa_factory_worker" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -anyhow = "1.0" parking_lot = { version = "0.12", features = ["deadlock_detection"] } -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" crossbeam-channel = "0.5" tracing = "0.1" # custom modules @@ -22,11 +19,10 @@ massa_time = { path = "../massa-time" } massa_wallet = { path = "../massa-wallet" } massa_hash = { path = "../massa-hash" } massa_pos_exports = { path = "../massa-pos-exports" } -massa_serialization = { path = "../massa-serialization" } massa_pool_exports = { path = "../massa-pool-exports" } +massa_versioning = { path = "../massa-versioning" } [dev-dependencies] -serial_test = "1.0" massa_protocol_exports = { path = "../massa-protocol-exports", features=["testing"] } massa_consensus_exports = { path = "../massa-consensus-exports", features = ["testing"] } massa_factory_exports = { path = "../massa-factory-exports", features=["testing"] } diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index e106700b438..4b26723770c 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -13,6 +13,7 @@ use massa_models::{ timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, }; use massa_time::MassaTime; +use massa_versioning::versioning::MipStore; use massa_wallet::Wallet; use parking_lot::RwLock; use std::{ @@ -28,6 +29,7 @@ pub(crate) struct BlockFactoryWorker { wallet: Arc>, channels: FactoryChannels, factory_receiver: mpsc::Receiver<()>, + mip_store: MipStore, } impl BlockFactoryWorker { @@ -38,6 +40,7 @@ impl BlockFactoryWorker { wallet: Arc>, channels: FactoryChannels, factory_receiver: mpsc::Receiver<()>, + mip_store: MipStore, ) -> thread::JoinHandle<()> { thread::Builder::new() .name("block-factory".into()) @@ -47,6 +50,7 @@ impl BlockFactoryWorker { wallet, channels, factory_receiver, + mip_store, }; this.run(); }) @@ -210,8 +214,12 @@ impl BlockFactoryWorker { ); // create header + let current_version = self.mip_store.get_network_version_current(); + let announced_version = self.mip_store.get_network_version_to_announce(); let header: SecuredHeader = BlockHeader::new_verifiable::( BlockHeader { + current_version, + announced_version, slot, parents: parents.into_iter().map(|(id, _period)| id).collect(), operation_merkle_root: global_operations_hash, diff --git a/massa-factory-worker/src/run.rs b/massa-factory-worker/src/run.rs index 6e7f4e71e55..56481e7ba27 100644 --- a/massa-factory-worker/src/run.rs +++ b/massa-factory-worker/src/run.rs @@ -1,5 +1,6 @@ //! Copyright (c) 2022 MASSA LABS +use massa_versioning::versioning::MipStore; use parking_lot::RwLock; use std::sync::{mpsc, Arc}; @@ -23,6 +24,7 @@ pub fn start_factory( cfg: FactoryConfig, wallet: Arc>, channels: FactoryChannels, + mip_store: MipStore, ) -> Box { // create block factory channel let (block_worker_tx, block_worker_rx) = mpsc::channel::<()>(); @@ -36,6 +38,7 @@ pub fn start_factory( wallet.clone(), channels.clone(), block_worker_rx, + mip_store, ); // start endorsement factory worker diff --git a/massa-factory-worker/src/tests/scenarios.rs b/massa-factory-worker/src/tests/scenarios.rs index 45267a5150e..6d79fa927ee 100644 --- a/massa-factory-worker/src/tests/scenarios.rs +++ b/massa-factory-worker/src/tests/scenarios.rs @@ -11,7 +11,7 @@ use std::str::FromStr; #[test] #[ignore] fn basic_creation() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let mut test_factory = TestFactory::new(&keypair); let (block_id, storage) = test_factory.get_next_created_block(None, None); assert_eq!(block_id, storage.read_blocks().get(&block_id).unwrap().id); @@ -21,7 +21,7 @@ fn basic_creation() { #[test] #[ignore] fn basic_creation_with_operation() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let mut test_factory = TestFactory::new(&keypair); let content = Operation { @@ -44,7 +44,7 @@ fn basic_creation_with_operation() { #[test] #[ignore] fn basic_creation_with_multiple_operations() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let mut test_factory = TestFactory::new(&keypair); let content = Operation { diff --git a/massa-factory-worker/src/tests/tools.rs b/massa-factory-worker/src/tests/tools.rs index 57113797442..8c316da3d10 100644 --- a/massa-factory-worker/src/tests/tools.rs +++ b/massa-factory-worker/src/tests/tools.rs @@ -2,6 +2,10 @@ use crossbeam_channel::Receiver; use massa_consensus_exports::test_exports::{ ConsensusControllerImpl, ConsensusEventReceiver, MockConsensusControllerMessage, }; +use massa_models::config::MIP_STORE_STATS_BLOCK_CONSIDERED; +use massa_models::config::MIP_STORE_STATS_COUNTERS_MAX; +use massa_versioning::versioning::MipStatsConfig; +use massa_versioning::versioning::MipStore; use parking_lot::RwLock; use std::{sync::Arc, thread::sleep, time::Duration}; @@ -76,6 +80,15 @@ impl TestFactory { .genesis_timestamp .checked_sub(factory_config.t0) .unwrap(); + + // create an empty default store + let mip_stats_config = MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }; + let mip_store = + MipStore::try_from(([], mip_stats_config)).expect("Cannot create an empty MIP store"); + let factory_manager = start_factory( factory_config.clone(), Arc::new(RwLock::new(create_test_wallet(Some(accounts)))), @@ -86,6 +99,7 @@ impl TestFactory { protocol: Box::new(protocol_controller), storage: storage.clone_without_refs(), }, + mip_store, ); TestFactory { diff --git a/massa-final-state/Cargo.toml b/massa-final-state/Cargo.toml index fdf3d68e478..c7ecee1d393 100644 --- a/massa-final-state/Cargo.toml +++ b/massa-final-state/Cargo.toml @@ -1,16 +1,19 @@ [package] name = "massa_final_state" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" [dependencies] displaydoc = "0.2" serde = { version = "1.0", features = ["derive"] } -nom = "7.1" +nom = "=7.1" bs58 = { version = "0.4", features = ["check"] } thiserror = "1.0" tracing = "0.1" +parking_lot = { version = "0.12", features = ["deadlock_detection"] } +rocksdb = { version = "0.20" } + # custom modules massa_ledger_exports = { path = "../massa-ledger-exports" } massa_ledger_worker = { path = "../massa-ledger-worker", optional = true } @@ -19,16 +22,19 @@ massa_models = { path = "../massa-models" } massa_async_pool = { path = "../massa-async-pool" } massa_serialization = { path = "../massa-serialization" } massa_pos_exports = { path = "../massa-pos-exports" } -massa_hash = { path = "../massa-hash" } +massa_db = { path = "../massa-db" } massa_proto = { path = "../massa-proto" } +massa_versioning = { path = "../massa-versioning" } +massa_time = { path = "../massa-time" } [dev-dependencies] massa_async_pool = { path = "../massa-async-pool", features = ["testing"] } massa_ledger_worker = { path = "../massa-ledger-worker", features = [ "testing", ] } +massa_pos_worker = { path = "../massa-pos-worker", features = ["testing"] } massa_pos_exports = { path = "../massa-pos-exports", features = ["testing"] } -massa_signature = { path = "../massa-signature" } +tempfile = "3.3" # for more information on what are the following features used for, see the cargo.toml at workspace level [features] @@ -38,4 +44,3 @@ testing = [ "massa_pos_exports/testing", ] sandbox = ["massa_async_pool/sandbox"] -create_snapshot = [] \ No newline at end of file diff --git a/massa-final-state/src/config.rs b/massa-final-state/src/config.rs index 9b1080bbba5..a48b0606f9e 100644 --- a/massa-final-state/src/config.rs +++ b/massa-final-state/src/config.rs @@ -6,6 +6,7 @@ use massa_async_pool::AsyncPoolConfig; use massa_executed_ops::{ExecutedDenunciationsConfig, ExecutedOpsConfig}; use massa_ledger_exports::LedgerConfig; use massa_pos_exports::PoSConfig; +use massa_time::MassaTime; use std::path::PathBuf; /// Ledger configuration @@ -38,4 +39,8 @@ pub struct FinalStateConfig { /// max number of denunciations that can be included in a block header /// or in executed denunciations struct pub max_denunciations_per_block_header: u32, + /// TODO + pub t0: MassaTime, + /// TODO + pub genesis_timestamp: MassaTime, } diff --git a/massa-final-state/src/error.rs b/massa-final-state/src/error.rs index 860f99ad373..06fcf02d5f0 100644 --- a/massa-final-state/src/error.rs +++ b/massa-final-state/src/error.rs @@ -5,6 +5,8 @@ use displaydoc::Display; use thiserror::Error; +use massa_versioning::versioning::ExtendFromDbError; + /// Final state error #[non_exhaustive] #[derive(Display, Error, Debug)] @@ -17,4 +19,6 @@ pub enum FinalStateError { PosError(String), /// Snapshot error: {0} SnapshotError(String), + /// ExtendFromDbError + MipStoreError(#[from] ExtendFromDbError), } diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index 1b82622666e..7b937c3f8a0 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -6,45 +6,31 @@ //! and need to be bootstrapped by nodes joining the network. use crate::{config::FinalStateConfig, error::FinalStateError, state_changes::StateChanges}; -use massa_async_pool::{ - AsyncMessage, AsyncMessageId, AsyncPool, AsyncPoolChanges, AsyncPoolDeserializer, - AsyncPoolSerializer, Change, -}; -use massa_executed_ops::{ - ExecutedDenunciations, ExecutedDenunciationsDeserializer, ExecutedDenunciationsSerializer, - ExecutedOps, ExecutedOpsDeserializer, ExecutedOpsSerializer, -}; -use massa_hash::{Hash, HashDeserializer, HASH_SIZE_BYTES}; -use massa_ledger_exports::{Key as LedgerKey, LedgerChanges, LedgerController}; -use massa_models::denunciation::DenunciationIndex; -use massa_models::{ - // TODO: uncomment when deserializing the final state from ledger - /*config::{ - MAX_ASYNC_POOL_LENGTH, MAX_DATASTORE_KEY_LENGTH, MAX_DEFERRED_CREDITS_LENGTH, - MAX_EXECUTED_OPS_LENGTH, MAX_OPERATIONS_PER_BLOCK, MAX_PRODUCTION_STATS_LENGTH, - MAX_ROLLS_COUNT_LENGTH, - },*/ - operation::OperationId, - prehash::PreHashSet, - slot::{Slot, SlotDeserializer, SlotSerializer}, - streaming_step::StreamingStep, -}; -use massa_pos_exports::{ - CycleHistoryDeserializer, CycleHistorySerializer, CycleInfo, DeferredCredits, - DeferredCreditsDeserializer, DeferredCreditsSerializer, PoSFinalState, SelectorController, + +use massa_async_pool::AsyncPool; +use massa_db::{DBBatch, MassaDB, CHANGE_ID_DESER_ERROR, MIP_STORE_PREFIX}; +use massa_db::{ + ASYNC_POOL_PREFIX, CYCLE_HISTORY_PREFIX, DEFERRED_CREDITS_PREFIX, + EXECUTED_DENUNCIATIONS_PREFIX, EXECUTED_OPS_PREFIX, LEDGER_PREFIX, STATE_CF, }; -use massa_serialization::{Deserializer, SerializeError, Serializer}; -use nom::{error::context, sequence::tuple, IResult, Parser}; -use std::collections::{BTreeMap, HashSet, VecDeque}; -use std::ops::Bound::{Excluded, Included}; -use tracing::{debug, info}; +use massa_executed_ops::ExecutedDenunciations; +use massa_executed_ops::ExecutedOps; +use massa_ledger_exports::LedgerController; +use massa_models::config::PERIODS_BETWEEN_BACKUPS; +use massa_models::slot::Slot; +use massa_pos_exports::{PoSFinalState, SelectorController}; +use massa_versioning::versioning::MipStore; + +use parking_lot::RwLock; +use rocksdb::IteratorMode; +use tracing::{debug, info, warn}; + +use std::sync::Arc; /// Represents a final state `(ledger, async pool, executed_ops, executed_de and the state of the PoS)` pub struct FinalState { /// execution state configuration pub(crate) config: FinalStateConfig, - /// slot at the output of which the state is attached - pub slot: Slot, /// final ledger associating addresses to their balance, executable bytecode and data pub ledger: Box, /// asynchronous pool containing messages sorted by priority and their data @@ -55,20 +41,22 @@ pub struct FinalState { pub executed_ops: ExecutedOps, /// executed denunciations pub executed_denunciations: ExecutedDenunciations, - /// history of recent final state changes, useful for streaming bootstrap - /// `front = oldest`, `back = newest` - pub changes_history: VecDeque<(Slot, StateChanges)>, - /// hash of the final state, it is computed on finality - pub final_state_hash: Hash, + /// MIP store + pub mip_store: MipStore, /// last_start_period - /// * If start all new network: set to 0 + /// * If start new network: set to 0 /// * If from snapshot: retrieve from args /// * If from bootstrap: set during bootstrap pub last_start_period: u64, + /// last_slot_before_downtime + /// * None if start new network + /// * If from snapshot: retrieve from the slot attached to the snapshot + /// * If from bootstrap: set during bootstrap + pub last_slot_before_downtime: Option, + /// the rocksdb instance used to write every final_state struct on disk + pub db: Arc>, } -const FINAL_STATE_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; - impl FinalState { /// Initializes a new `FinalState` /// @@ -76,47 +64,81 @@ impl FinalState { /// * `config`: the configuration of the final state to use for initialization /// * `ledger`: the instance of the ledger on disk. Used to apply changes to the ledger. /// * `selector`: the pos selector. Used to send draw inputs when a new cycle is completed. + /// * `reset_final_state`: if true, we only keep the ledger, and we reset the other fields of the final state pub fn new( + db: Arc>, config: FinalStateConfig, ledger: Box, selector: Box, + mut mip_store: MipStore, + reset_final_state: bool, ) -> Result { + let db_slot = db + .read() + .get_change_id() + .map_err(|_| FinalStateError::InvalidSlot(String::from("Could not get slot in db")))?; + // create the pos state let pos_state = PoSFinalState::new( config.pos_config.clone(), &config.initial_seed_string, &config.initial_rolls_path, selector, - ledger.get_ledger_hash(), + db.clone(), ) .map_err(|err| FinalStateError::PosError(format!("PoS final state init error: {}", err)))?; // attach at the output of the latest initial final slot, that is the last genesis slot - let slot = Slot::new(0, config.thread_count.saturating_sub(1)); + let slot = if reset_final_state { + Slot::new(0, config.thread_count.saturating_sub(1)) + } else { + db_slot + }; // create the async pool - let async_pool = AsyncPool::new(config.async_pool_config.clone()); + let async_pool = AsyncPool::new(config.async_pool_config.clone(), db.clone()); // create a default executed ops - let executed_ops = ExecutedOps::new(config.executed_ops_config.clone()); + let executed_ops = ExecutedOps::new(config.executed_ops_config.clone(), db.clone()); // create a default executed denunciations let executed_denunciations = - ExecutedDenunciations::new(config.executed_denunciations_config.clone()); + ExecutedDenunciations::new(config.executed_denunciations_config.clone(), db.clone()); - // create the final state - Ok(FinalState { - slot, + // init MIP store by reading from the db + mip_store + .extend_from_db(db.clone()) + .map_err(FinalStateError::from)?; + + let mut final_state = FinalState { ledger, async_pool, pos_state, config, executed_ops, executed_denunciations, - changes_history: Default::default(), // no changes in history - final_state_hash: Hash::from_bytes(FINAL_STATE_HASH_INITIAL_BYTES), + mip_store, last_start_period: 0, - }) + last_slot_before_downtime: None, + db, + }; + + if reset_final_state { + final_state.async_pool.reset(); + final_state.pos_state.reset(); + final_state.executed_ops.reset(); + final_state.executed_denunciations.reset(); + final_state.db.read().set_initial_change_id(slot); + } + + info!( + "final_state hash at slot {}: {}", + slot, + final_state.db.read().get_db_hash() + ); + + // create the final state + Ok(final_state) } /// Initializes a `FinalState` from a snapshot. Currently, we do not use the final_state from the ledger, @@ -128,44 +150,89 @@ impl FinalState { /// * `selector`: the pos selector. Used to send draw inputs when a new cycle is completed. /// * `last_start_period`: at what period we should attach the final_state pub fn new_derived_from_snapshot( + db: Arc>, config: FinalStateConfig, ledger: Box, selector: Box, + mip_store: MipStore, last_start_period: u64, ) -> Result { info!("Restarting from snapshot"); - // FIRST, we recover the last known final_state - let mut final_state = FinalState::new(config, ledger, selector)?; - let _final_state_hash_from_snapshot = Hash::from_bytes(FINAL_STATE_HASH_INITIAL_BYTES); - final_state.pos_state.create_initial_cycle(); + let mut final_state = + FinalState::new(db, config.clone(), ledger, selector, mip_store, false)?; - // TODO: We recover the final_state from the RocksDB instance instead - /*let final_state_data = ledger - .get_final_state() - .expect("Cannot retrieve ledger final_state data");*/ + let recovered_slot = + final_state.db.read().get_change_id().map_err(|_| { + FinalStateError::InvalidSlot(String::from("Could not get slot in db")) + })?; - final_state.slot = final_state.ledger.get_slot().map_err(|_| { - FinalStateError::InvalidSlot(String::from("Could not recover Slot in Ledger")) - })?; + // This is needed for `test_bootstrap_server` to work + if cfg!(feature = "testing") { + let mut batch = DBBatch::new(); + final_state.pos_state.create_initial_cycle(&mut batch); + final_state + .db + .write() + .write_batch(batch, Default::default(), Some(recovered_slot)); + } + final_state.last_slot_before_downtime = Some(recovered_slot); + + // Check that MIP store is coherent with the network shutdown time range + // Assume that the final state has been edited during network shutdown + let shutdown_start = recovered_slot + .get_next_slot(config.thread_count) + .map_err(|e| { + FinalStateError::InvalidSlot(format!( + "Unable to get next slot from recovered slot: {:?}", + e + )) + })?; + let shutdown_end = Slot::new(last_start_period, 0) + .get_prev_slot(config.thread_count) + .map_err(|e| { + FinalStateError::InvalidSlot(format!( + "Unable to compute prev slot from last start period: {:?}", + e + )) + })?; debug!( - "Latest consistent slot found in snapshot data: {}", - final_state.slot + "Checking if MIP store is coherent against shutdown period: {} - {}", + shutdown_start, shutdown_end ); - final_state.compute_state_hash_at_slot(final_state.slot); + if !final_state + .mip_store + .is_coherent_with_shutdown_period( + shutdown_start, + shutdown_end, + config.thread_count, + config.t0, + config.genesis_timestamp, + ) + .unwrap_or(false) + { + return Err(FinalStateError::InvalidSlot( + "MIP store is Not coherent".to_string(), + )); + } + + debug!( + "Latest consistent slot found in snapshot data: {}", + recovered_slot + ); - // Check the hash to see if we correctly recovered the snapshot - // TODO: Redo this check when we get the final_state from the ledger - /*if final_state.final_state_hash != final_state_hash_from_snapshot { - warn!("The hash of the final_state recovered from the snapshot is different from the hash saved."); - }*/ + info!( + "final_state hash at slot {}: {}", + recovered_slot, + final_state.db.read().get_db_hash() + ); // Then, interpolate the downtime, to attach at end_slot; final_state.last_start_period = last_start_period; - final_state.init_ledger_hash(last_start_period); + final_state.recompute_caches(); // We compute the draws here because we need to feed_cycles when interpolating final_state.compute_initial_draws()?; @@ -175,27 +242,13 @@ impl FinalState { Ok(final_state) } - /// Used after bootstrap, to set the initial ledger hash (used in initial draws) - pub fn init_ledger_hash(&mut self, last_start_period: u64) { - let slot = Slot::new( - last_start_period, - self.config.thread_count.saturating_sub(1), - ); - self.ledger.set_initial_slot(slot); - self.pos_state.initial_ledger_hash = self.ledger.get_ledger_hash(); - - info!( - "Set initial ledger hash to {}", - self.ledger.get_ledger_hash().to_string() - ) - } - /// Once we created a FinalState from a snapshot, we need to edit it to attach at the end_slot and handle the downtime. /// This basically recreates the history of the final_state, without executing the slots. fn interpolate_downtime(&mut self) -> Result<(), FinalStateError> { - // TODO: Change the current_slot when we deserialize the final state from RocksDB. Until then, final_state slot and the ledger slot are not consistent! - // let current_slot = self.slot; - let current_slot = Slot::new(0, self.config.thread_count.saturating_sub(1)); + let current_slot = + self.db.read().get_change_id().map_err(|_| { + FinalStateError::InvalidSlot(String::from("Could not get slot in db")) + })?; let current_slot_cycle = current_slot.get_cycle(self.config.periods_per_cycle); let end_slot = Slot::new( @@ -217,15 +270,18 @@ impl FinalState { )?; } - self.slot = end_slot; - // Recompute the hash with the updated data and feed it to POS_state. - self.compute_state_hash_at_slot(self.slot); + let final_state_hash = self.db.read().get_db_hash(); + + info!( + "final_state hash at slot {}: {}", + end_slot, final_state_hash + ); // feed final_state_hash to the last cycle - let cycle = self.slot.get_cycle(self.config.periods_per_cycle); + let cycle = end_slot.get_cycle(self.config.periods_per_cycle); self.pos_state - .feed_cycle_state_hash(cycle, self.final_state_hash); + .feed_cycle_state_hash(cycle, final_state_hash); Ok(()) } @@ -236,14 +292,28 @@ impl FinalState { current_slot: Slot, end_slot: Slot, ) -> Result<(), FinalStateError> { - let latest_snapshot_cycle_info = + let latest_snapshot_cycle = self.pos_state - .cycle_history + .cycle_history_cache .pop_back() .ok_or(FinalStateError::SnapshotError(String::from( "Invalid cycle_history", )))?; + let latest_snapshot_cycle_info = self.pos_state.get_cycle_info(latest_snapshot_cycle.0); + + let mut batch = DBBatch::new(); + + self.pos_state + .delete_cycle_info(latest_snapshot_cycle.0, &mut batch); + + self.pos_state + .db + .write() + .write_batch(batch, Default::default(), Some(end_slot)); + + let mut batch = DBBatch::new(); + self.pos_state .create_new_cycle_from_last( &latest_snapshot_cycle_info, @@ -251,9 +321,15 @@ impl FinalState { .get_next_slot(self.config.thread_count) .expect("Cannot get next slot"), end_slot, + &mut batch, ) .map_err(|err| FinalStateError::PosError(format!("{}", err)))?; + self.pos_state + .db + .write() + .write_batch(batch, Default::default(), Some(end_slot)); + Ok(()) } @@ -265,16 +341,27 @@ impl FinalState { current_slot_cycle: u64, end_slot_cycle: u64, ) -> Result<(), FinalStateError> { - let latest_snapshot_cycle_info = + let latest_snapshot_cycle = self.pos_state - .cycle_history + .cycle_history_cache .pop_back() .ok_or(FinalStateError::SnapshotError(String::from( "Invalid cycle_history", )))?; - // Firstly, complete the first cycle + let latest_snapshot_cycle_info = self.pos_state.get_cycle_info(latest_snapshot_cycle.0); + + let mut batch = DBBatch::new(); + + self.pos_state + .delete_cycle_info(latest_snapshot_cycle.0, &mut batch); + + self.pos_state + .db + .write() + .write_batch(batch, Default::default(), Some(end_slot)); + // Firstly, complete the first cycle let last_slot = Slot::new_last_of_cycle( current_slot_cycle, self.config.periods_per_cycle, @@ -287,6 +374,8 @@ impl FinalState { )) })?; + let mut batch = DBBatch::new(); + self.pos_state .create_new_cycle_from_last( &latest_snapshot_cycle_info, @@ -294,14 +383,25 @@ impl FinalState { .get_next_slot(self.config.thread_count) .expect("Cannot get next slot"), last_slot, + &mut batch, ) .map_err(|err| FinalStateError::PosError(format!("{}", err)))?; + self.pos_state + .db + .write() + .write_batch(batch, Default::default(), Some(end_slot)); + // Feed final_state_hash to the completed cycle self.feed_cycle_hash_and_selector_for_interpolation(current_slot_cycle)?; - // Then, build all the already completed cycles - for cycle in (current_slot_cycle + 1)..end_slot_cycle { + // TODO: Bring back the following optimisation (it fails because of selector) + // Then, build all the completed cycles in betweens. If we have to build more cycles than the cycle_history_length, we only build the last ones. + //let current_slot_cycle = (current_slot_cycle + 1) + // .max(end_slot_cycle.saturating_sub(self.config.pos_config.cycle_history_length as u64)); + let current_slot_cycle = current_slot_cycle + 1; + + for cycle in current_slot_cycle..end_slot_cycle { let first_slot = Slot::new_first_of_cycle(cycle, self.config.periods_per_cycle) .map_err(|err| { FinalStateError::InvalidSlot(format!( @@ -322,10 +422,22 @@ impl FinalState { )) })?; + let mut batch = DBBatch::new(); + self.pos_state - .create_new_cycle_from_last(&latest_snapshot_cycle_info, first_slot, last_slot) + .create_new_cycle_from_last( + &latest_snapshot_cycle_info, + first_slot, + last_slot, + &mut batch, + ) .map_err(|err| FinalStateError::PosError(format!("{}", err)))?; + self.pos_state + .db + .write() + .write_batch(batch, Default::default(), Some(end_slot)); + // Feed final_state_hash to the completed cycle self.feed_cycle_hash_and_selector_for_interpolation(cycle)?; } @@ -339,8 +451,15 @@ impl FinalState { )) })?; + let mut batch = DBBatch::new(); + self.pos_state - .create_new_cycle_from_last(&latest_snapshot_cycle_info, first_slot, end_slot) + .create_new_cycle_from_last( + &latest_snapshot_cycle_info, + first_slot, + end_slot, + &mut batch, + ) .map_err(|err| FinalStateError::PosError(format!("{}", err)))?; // If the end_slot_cycle is completed @@ -350,10 +469,17 @@ impl FinalState { } // We reduce the cycle_history len as needed - while self.pos_state.cycle_history.len() > self.pos_state.config.cycle_history_length { - self.pos_state.cycle_history.pop_front(); + while self.pos_state.cycle_history_cache.len() > self.pos_state.config.cycle_history_length + { + if let Some((cycle, _)) = self.pos_state.cycle_history_cache.pop_front() { + self.pos_state.delete_cycle_info(cycle, &mut batch); + } } + self.db + .write() + .write_batch(batch, Default::default(), Some(end_slot)); + Ok(()) } @@ -362,8 +488,10 @@ impl FinalState { &mut self, cycle: u64, ) -> Result<(), FinalStateError> { + let final_state_hash = self.db.read().get_db_hash(); + self.pos_state - .feed_cycle_state_hash(cycle, self.final_state_hash); + .feed_cycle_state_hash(cycle, final_state_hash); self.pos_state .feed_selector(cycle.checked_add(2).ok_or_else(|| { @@ -379,53 +507,15 @@ impl FinalState { /// /// USED ONLY FOR BOOTSTRAP pub fn reset(&mut self) { - self.slot = Slot::new(0, self.config.thread_count.saturating_sub(1)); + self.db + .write() + .reset(Slot::new(0, self.config.thread_count.saturating_sub(1))); self.ledger.reset(); self.async_pool.reset(); self.pos_state.reset(); self.executed_ops.reset(); self.executed_denunciations.reset(); - self.changes_history.clear(); - // reset the final state hash - self.final_state_hash = Hash::from_bytes(FINAL_STATE_HASH_INITIAL_BYTES); - } - - /// Compute the current state hash. - /// - /// Used when finalizing a slot. - /// Slot information is only used for logging. - pub fn compute_state_hash_at_slot(&mut self, slot: Slot) { - // 1. init hash concatenation with the ledger hash - let ledger_hash = self.ledger.get_ledger_hash(); - let mut hash_concat: Vec = ledger_hash.to_bytes().to_vec(); - // 2. async_pool hash - hash_concat.extend(self.async_pool.hash.to_bytes()); - // 3. pos deferred_credit hash - let deferred_credit_hash = match self.pos_state.deferred_credits.get_hash() { - Some(hash) => hash, - None => self - .pos_state - .deferred_credits - .enable_hash_tracker_and_compute_hash(), - }; - hash_concat.extend(deferred_credit_hash.to_bytes()); - // 4. pos cycle history hashes, skip the bootstrap safety cycle if there is one - let n = (self.pos_state.cycle_history.len() == self.config.pos_config.cycle_history_length) - as usize; - for cycle_info in self.pos_state.cycle_history.iter().skip(n) { - hash_concat.extend(cycle_info.cycle_global_hash.to_bytes()); - } - // 5. executed operations hash - hash_concat.extend(self.executed_ops.hash.to_bytes()); - // 6. executed denunciations hash - hash_concat.extend(self.executed_denunciations.hash.to_bytes()); - // 7. compute and save final state hash - self.final_state_hash = Hash::compute_from(&hash_concat); - - info!( - "final_state hash at slot {}: {}", - slot, self.final_state_hash - ); + self.mip_store.reset_db(self.db.clone()); } /// Performs the initial draws. @@ -440,504 +530,180 @@ impl FinalState { /// /// Panics if the new slot is not the one coming just after the current one. pub fn finalize(&mut self, slot: Slot, changes: StateChanges) { + let cur_slot = self.db.read().get_change_id().expect(CHANGE_ID_DESER_ERROR); // check slot consistency - let next_slot = self - .slot + let next_slot = cur_slot .get_next_slot(self.config.thread_count) .expect("overflow in execution state slot"); - if slot != next_slot { - panic!("attempting to apply execution state changes at slot {} while the current slot is {}", slot, self.slot); - } - // update current slot - self.slot = slot; + assert_eq!( + slot, next_slot, + "attempting to apply execution state changes at slot {} while the current slot is {}", + slot, cur_slot + ); + + let mut db_batch = DBBatch::new(); + + // apply the state changes to the batch - // apply the state changes self.async_pool - .apply_changes_unchecked(&changes.async_pool_changes); + .apply_changes_to_batch(&changes.async_pool_changes, &mut db_batch); + self.pos_state - .apply_changes(changes.pos_changes.clone(), self.slot, true) + .apply_changes_to_batch(changes.pos_changes.clone(), slot, true, &mut db_batch) .expect("could not settle slot in final state proof-of-stake"); // TODO: // do not panic above, it might just mean that the lookback cycle is not available // bootstrap again instead - self.executed_ops - .apply_changes(changes.executed_ops_changes.clone(), self.slot); - self.executed_denunciations - .apply_changes(changes.executed_denunciations_changes.clone(), self.slot); - - let mut final_state_data = None; - - if cfg!(feature = "create_snapshot") { - let /*mut*/ final_state_buffer = Vec::new(); - - /*let final_state_raw_serializer = FinalStateRawSerializer::new(); - - let final_state_raw = FinalStateRaw { - async_pool_messages: self.async_pool.messages.clone(), - cycle_history: self.pos_state.cycle_history.clone(), - deferred_credits: self.pos_state.deferred_credits.clone(), - sorted_ops: self.executed_ops.sorted_ops.clone(), - latest_consistent_slot: self.slot, - final_state_hash_from_snapshot: self.final_state_hash, - }; - - if final_state_raw_serializer - .serialize(&final_state_raw, &mut final_state_buffer) - .is_err() - { - debug!("Error while trying to serialize final_state"); - }*/ - - final_state_data = Some(final_state_buffer) - } self.ledger - .apply_changes(changes.ledger_changes.clone(), self.slot, final_state_data); + .apply_changes_to_batch(changes.ledger_changes.clone(), &mut db_batch); - // push history element and limit history size - if self.config.final_history_length > 0 { - while self.changes_history.len() >= self.config.final_history_length { - self.changes_history.pop_front(); - } - self.changes_history.push_back((slot, changes)); - } + self.executed_ops.apply_changes_to_batch( + changes.executed_ops_changes.clone(), + slot, + &mut db_batch, + ); - // compute the final state hash - self.compute_state_hash_at_slot(slot); + self.executed_denunciations.apply_changes_to_batch( + changes.executed_denunciations_changes.clone(), + slot, + &mut db_batch, + ); - if cfg!(feature = "create_snapshot") { - let /*mut*/ hash_buffer = Vec::new(); + self.db + .write() + .write_batch(db_batch, Default::default(), Some(slot)); - /* - let hash_serializer = HashSerializer::new(); + let final_state_hash = self.db.read().get_db_hash(); - if hash_serializer - .serialize(&self.final_state_hash, &mut hash_buffer) - .is_err() - { - debug!("Error while trying to serialize final_state_hash"); - }*/ + // compute the final state hash + info!("final_state hash at slot {}: {}", slot, final_state_hash); + + // Backup DB if needed + if slot.period % PERIODS_BETWEEN_BACKUPS == 0 && slot.period != 0 && slot.thread == 0 { + let state_slot = self.db.read().get_change_id(); + match state_slot { + Ok(slot) => { + info!( + "Backuping db for slot {}, state slot: {}, state hash: {}", + slot, slot, final_state_hash + ); + } + Err(e) => { + info!("{}", e); + info!( + "Backuping db for unknown state slot, state hash: {}", + final_state_hash + ); + } + } - self.ledger.set_final_state_hash(hash_buffer); + self.db.read().backup_db(slot); } // feed final_state_hash to the last cycle let cycle = slot.get_cycle(self.config.periods_per_cycle); self.pos_state - .feed_cycle_state_hash(cycle, self.final_state_hash); + .feed_cycle_state_hash(cycle, final_state_hash); } - /// Used for bootstrap. - /// - /// Retrieves every: - /// * ledger change that is after `slot` and before or equal to `ledger_step` key - /// * ledger change if main bootstrap process is finished - /// * async pool change that is after `slot` and before or equal to `pool_step` message id - /// * async pool change if main bootstrap process is finished - /// * proof-of-stake deferred credits change if main bootstrap process is finished - /// * proof-of-stake deferred credits change that is after `slot` and before or equal to `credits_step` slot - /// * proof-of-stake cycle history change if main bootstrap process is finished - /// * executed ops change if main bootstrap process is finished - /// - /// Produces an error when the `slot` is too old for `self.changes_history` - #[allow(clippy::too_many_arguments)] - pub fn get_state_changes_part( - &self, - slot: Slot, - ledger_step: StreamingStep, - pool_step: StreamingStep, - cycle_step: StreamingStep, - credits_step: StreamingStep, - ops_step: StreamingStep, - de_step: StreamingStep, - ) -> Result, FinalStateError> { - let position_slot = if let Some((first_slot, _)) = self.changes_history.front() { - // Safe because we checked that there is changes just above. - let index = slot - .slots_since(first_slot, self.config.thread_count) - .map_err(|_| { - FinalStateError::InvalidSlot( - "get_state_changes_part given slot is overflowing history".to_string(), - ) - })? - .saturating_add(1); - - // Check if the `slot` index isn't in the future - if self.changes_history.len() as u64 <= index { - return Err(FinalStateError::InvalidSlot( - "slot index is overflowing history".to_string(), - )); + /// After bootstrap or load from disk, recompute all the caches. + pub fn recompute_caches(&mut self) { + self.async_pool.recompute_message_info_cache(); + self.executed_ops.recompute_sorted_ops_and_op_exec_status(); + self.executed_denunciations.recompute_sorted_denunciations(); + self.pos_state.recompute_pos_state_caches(); + } + + /// Deserialize the entire DB and check the data. Useful to check after bootstrap. + pub fn is_db_valid(&self) -> bool { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).unwrap(); + + for (serialized_key, serialized_value) in + db.db.iterator_cf(handle, IteratorMode::Start).flatten() + { + if !serialized_key.starts_with(CYCLE_HISTORY_PREFIX.as_bytes()) + && !serialized_key.starts_with(DEFERRED_CREDITS_PREFIX.as_bytes()) + && !serialized_key.starts_with(ASYNC_POOL_PREFIX.as_bytes()) + && !serialized_key.starts_with(EXECUTED_OPS_PREFIX.as_bytes()) + && !serialized_key.starts_with(EXECUTED_DENUNCIATIONS_PREFIX.as_bytes()) + && !serialized_key.starts_with(LEDGER_PREFIX.as_bytes()) + && !serialized_key.starts_with(MIP_STORE_PREFIX.as_bytes()) + { + warn!( + "Key/value does not correspond to any prefix: serialized_key: {:?}, serialized_value: {:?}", + serialized_key, serialized_value + ); + return false; } - index - } else { - return Ok(Vec::new()); - }; - let mut res_changes: Vec<(Slot, StateChanges)> = Vec::new(); - for (slot, changes) in self.changes_history.range((position_slot as usize)..) { - let mut slot_changes = StateChanges::default(); - - // Get ledger change that concern address <= ledger_step - match ledger_step.clone() { - StreamingStep::Ongoing(key) => { - let ledger_changes: LedgerChanges = LedgerChanges( - changes - .ledger_changes - .0 - .iter() - .filter_map(|(address, change)| { - if *address <= key.address { - Some((*address, change.clone())) - } else { - None - } - }) - .collect(), + + if serialized_key.starts_with(CYCLE_HISTORY_PREFIX.as_bytes()) { + if !self + .pos_state + .is_cycle_history_key_value_valid(&serialized_key, &serialized_value) + { + warn!( + "Wrong key/value for CYCLE_HISTORY_KEY PREFIX serialized_key: {:?}, serialized_value: {:?}", + serialized_key, serialized_value ); - slot_changes.ledger_changes = ledger_changes; - } - StreamingStep::Finished(_) => { - slot_changes.ledger_changes = changes.ledger_changes.clone(); + return false; } - _ => (), - } - - // Get async pool changes that concern ids <= pool_step - match pool_step { - StreamingStep::Ongoing(last_id) => { - let async_pool_changes: AsyncPoolChanges = AsyncPoolChanges( - changes - .async_pool_changes - .0 - .iter() - .filter_map(|change| match change { - Change::Add(id, _) | Change::Activate(id) | Change::Delete(id) - if id <= &last_id => - { - Some(change.clone()) - } - _ => None, - }) - .collect(), + } else if serialized_key.starts_with(DEFERRED_CREDITS_PREFIX.as_bytes()) { + if !self + .pos_state + .is_deferred_credits_key_value_valid(&serialized_key, &serialized_value) + { + warn!( + "Wrong key/value for DEFERRED_CREDITS PREFIX serialized_key: {:?}, serialized_value: {:?}", + serialized_key, serialized_value ); - slot_changes.async_pool_changes = async_pool_changes; + return false; } - StreamingStep::Finished(_) => { - slot_changes.async_pool_changes = changes.async_pool_changes.clone(); + } else if serialized_key.starts_with(ASYNC_POOL_PREFIX.as_bytes()) { + if !self + .async_pool + .is_key_value_valid(&serialized_key, &serialized_value) + { + warn!( + "Wrong key/value for ASYNC_POOL PREFIX serialized_key: {:?}, serialized_value: {:?}", + serialized_key, serialized_value + ); + return false; } - _ => (), - } - - // Get PoS deferred credits changes that concern credits <= credits_step - match credits_step { - StreamingStep::Ongoing(cursor_slot) => { - let mut deferred_credits = DeferredCredits::new_with_hash(); - deferred_credits.credits = changes - .pos_changes - .deferred_credits - .credits - .iter() - .filter_map(|(credits_slot, credits)| { - if *credits_slot <= cursor_slot { - Some((*credits_slot, credits.clone())) - } else { - None - } - }) - .collect(); - - slot_changes.pos_changes.deferred_credits = deferred_credits; + } else if serialized_key.starts_with(EXECUTED_OPS_PREFIX.as_bytes()) { + if !self + .executed_ops + .is_key_value_valid(&serialized_key, &serialized_value) + { + warn!( + "Wrong key/value for EXECUTED_OPS PREFIX serialized_key: {:?}, serialized_value: {:?}", + serialized_key, serialized_value + ); + return false; } - StreamingStep::Finished(_) => { - slot_changes.pos_changes.deferred_credits = - changes.pos_changes.deferred_credits.clone(); + } else if serialized_key.starts_with(EXECUTED_DENUNCIATIONS_PREFIX.as_bytes()) { + if !self + .executed_denunciations + .is_key_value_valid(&serialized_key, &serialized_value) + { + warn!("Wrong key/value for EXECUTED_DENUNCIATIONS PREFIX serialized_key: {:?}, serialized_value: {:?}", serialized_key, serialized_value); + return false; } - _ => (), - } - - // Get PoS cycle changes if cycle history main bootstrap finished - if cycle_step.finished() { - slot_changes.pos_changes.seed_bits = changes.pos_changes.seed_bits.clone(); - slot_changes.pos_changes.roll_changes = changes.pos_changes.roll_changes.clone(); - slot_changes.pos_changes.production_stats = - changes.pos_changes.production_stats.clone(); - } - - // Get executed operations changes if executed ops main bootstrap finished - if ops_step.finished() { - slot_changes.executed_ops_changes = changes.executed_ops_changes.clone(); - } - if de_step.finished() { - slot_changes.executed_denunciations_changes = - changes.executed_denunciations_changes.clone(); + } else if serialized_key.starts_with(LEDGER_PREFIX.as_bytes()) + && !self + .ledger + .is_key_value_valid(&serialized_key, &serialized_value) + { + warn!( + "Wrong key/value for LEDGER PREFIX serialized_key: {:?}, serialized_value: {:?}", + serialized_key, serialized_value + ); + return false; } - - // Push the slot changes - res_changes.push((*slot, slot_changes)); - } - Ok(res_changes) - } -} - -/// Serializer for `FinalStateRaw` -pub struct FinalStateRawSerializer { - async_pool_serializer: AsyncPoolSerializer, - cycle_history_serializer: CycleHistorySerializer, - deferred_credits_serializer: DeferredCreditsSerializer, - executed_ops_serializer: ExecutedOpsSerializer, - executed_denunciations_serializer: ExecutedDenunciationsSerializer, - slot_serializer: SlotSerializer, -} - -impl Default for FinalStateRawSerializer { - fn default() -> Self { - Self::new() - } -} - -impl From for FinalStateRaw { - fn from(value: FinalState) -> Self { - Self { - async_pool_messages: value.async_pool.messages, - cycle_history: value.pos_state.cycle_history, - deferred_credits: value.pos_state.deferred_credits, - sorted_ops: value.executed_ops.sorted_ops, - sorted_denunciations: value.executed_denunciations.sorted_denunciations, - latest_consistent_slot: value.slot, - final_state_hash_from_snapshot: value.final_state_hash, - } - } -} - -impl FinalStateRawSerializer { - /// Initialize a `FinalStateRaweSerializer` - pub fn new() -> Self { - Self { - async_pool_serializer: AsyncPoolSerializer::new(), - cycle_history_serializer: CycleHistorySerializer::new(), - deferred_credits_serializer: DeferredCreditsSerializer::new(), - executed_ops_serializer: ExecutedOpsSerializer::new(), - executed_denunciations_serializer: ExecutedDenunciationsSerializer::new(), - slot_serializer: SlotSerializer::new(), - } - } -} - -impl Serializer for FinalStateRawSerializer { - fn serialize(&self, value: &FinalStateRaw, buffer: &mut Vec) -> Result<(), SerializeError> { - // Serialize Async Pool - self.async_pool_serializer - .serialize(&value.async_pool_messages, buffer)?; - - // Serialize pos state - self.cycle_history_serializer - .serialize(&value.cycle_history, buffer)?; - self.deferred_credits_serializer - .serialize(&value.deferred_credits, buffer)?; - - // Serialize Executed Ops - self.executed_ops_serializer - .serialize(&value.sorted_ops, buffer)?; - // Serialize Executed Denunciations - self.executed_denunciations_serializer - .serialize(&value.sorted_denunciations, buffer)?; - - // Serialize metadata - self.slot_serializer - .serialize(&value.latest_consistent_slot, buffer)?; - - // /!\ The final_state_hash has to be serialized separately! - - Ok(()) - } -} - -pub struct FinalStateRaw { - async_pool_messages: BTreeMap, - cycle_history: VecDeque, - deferred_credits: DeferredCredits, - sorted_ops: BTreeMap>, - sorted_denunciations: BTreeMap>, - latest_consistent_slot: Slot, - #[allow(dead_code)] - final_state_hash_from_snapshot: Hash, -} - -/// Deserializer for `FinalStateRaw` -pub struct FinalStateRawDeserializer { - async_deser: AsyncPoolDeserializer, - cycle_history_deser: CycleHistoryDeserializer, - deferred_credits_deser: DeferredCreditsDeserializer, - executed_ops_deser: ExecutedOpsDeserializer, - executed_denunciations_deser: ExecutedDenunciationsDeserializer, - slot_deser: SlotDeserializer, - hash_deser: HashDeserializer, -} - -impl FinalStateRawDeserializer { - #[allow(clippy::too_many_arguments)] - #[allow(dead_code)] - /// Initialize a `FinalStateRawDeserializer` - pub fn new( - config: FinalStateConfig, - max_async_pool_length: u64, - max_datastore_key_length: u8, - max_rolls_length: u64, - max_production_stats_length: u64, - max_credit_length: u64, - max_executed_ops_length: u64, - max_operations_per_block: u32, - ) -> Self { - Self { - async_deser: AsyncPoolDeserializer::new( - config.thread_count, - max_async_pool_length, - config.async_pool_config.max_async_message_data, - max_datastore_key_length as u32, - ), - cycle_history_deser: CycleHistoryDeserializer::new( - config.pos_config.cycle_history_length as u64, - max_rolls_length, - max_production_stats_length, - ), - deferred_credits_deser: DeferredCreditsDeserializer::new( - config.thread_count, - max_credit_length, - true, - ), - executed_ops_deser: ExecutedOpsDeserializer::new( - config.thread_count, - max_executed_ops_length, - max_operations_per_block as u64, - ), - executed_denunciations_deser: ExecutedDenunciationsDeserializer::new( - config.thread_count, - config.endorsement_count, - config.max_executed_denunciations_length, - config.max_denunciations_per_block_header as u64, - ), - slot_deser: SlotDeserializer::new( - (Included(u64::MIN), Included(u64::MAX)), - (Included(0), Excluded(config.thread_count)), - ), - hash_deser: HashDeserializer::new(), } - } -} -impl Deserializer for FinalStateRawDeserializer { - fn deserialize<'a, E: nom::error::ParseError<&'a [u8]> + nom::error::ContextError<&'a [u8]>>( - &self, - buffer: &'a [u8], - ) -> IResult<&'a [u8], FinalStateRaw, E> { - context("Failed FinalStateRaw deserialization", |buffer| { - tuple(( - context("Failed async_pool_messages deserialization", |input| { - self.async_deser.deserialize(input) - }), - context("Failed cycle_history deserialization", |input| { - self.cycle_history_deser.deserialize(input) - }), - context("Failed deferred_credits deserialization", |input| { - self.deferred_credits_deser.deserialize(input) - }), - context("Failed executed_ops deserialization", |input| { - self.executed_ops_deser.deserialize(input) - }), - context("Failed executed_denunciations deserialization", |input| { - self.executed_denunciations_deser.deserialize(input) - }), - context("Failed slot deserialization", |input| { - self.slot_deser.deserialize(input) - }), - context("Failed hash deserialization", |input| { - self.hash_deser.deserialize(input) - }), - )) - .map( - |( - async_pool_messages, - cycle_history, - deferred_credits, - sorted_ops, - sorted_denunciations, - latest_consistent_slot, - final_state_hash_from_snapshot, - )| FinalStateRaw { - async_pool_messages, - cycle_history: cycle_history.into(), - deferred_credits, - sorted_ops, - sorted_denunciations, - latest_consistent_slot, - final_state_hash_from_snapshot, - }, - ) - .parse(buffer) - }) - .parse(buffer) - } -} - -#[cfg(test)] -mod tests { - - use std::collections::VecDeque; - - use crate::StateChanges; - use massa_async_pool::test_exports::get_random_message; - use massa_ledger_exports::SetUpdateOrDelete; - use massa_models::{address::Address, slot::Slot}; - use massa_signature::KeyPair; - - fn get_random_address() -> Address { - let keypair = KeyPair::generate(); - Address::from_public_key(&keypair.get_public_key()) - } - - #[test] - fn get_state_changes_part() { - let message = get_random_message(None); - // Building the state changes - let mut history_state_changes: VecDeque<(Slot, StateChanges)> = VecDeque::new(); - let (low_address, high_address) = { - let address1 = get_random_address(); - let address2 = get_random_address(); - if address1 < address2 { - (address1, address2) - } else { - (address2, address1) - } - }; - let mut state_changes = StateChanges::default(); - state_changes - .ledger_changes - .0 - .insert(low_address, SetUpdateOrDelete::Delete); - state_changes - .async_pool_changes - .0 - .push(massa_async_pool::Change::Add(message.compute_id(), message)); - history_state_changes.push_front((Slot::new(3, 0), state_changes)); - let mut state_changes = StateChanges::default(); - state_changes - .ledger_changes - .0 - .insert(high_address, SetUpdateOrDelete::Delete); - history_state_changes.push_front((Slot::new(2, 0), state_changes.clone())); - history_state_changes.push_front((Slot::new(1, 0), state_changes)); - // TODO: re-enable this test after refactoring is over - // let mut final_state: FinalState = Default::default(); - // final_state.changes_history = history_state_changes; - // // Test slot filter - // let part = final_state - // .get_state_changes_part(Slot::new(2, 0), low_address, message.compute_id(), None) - // .unwrap(); - // assert_eq!(part.ledger_changes.0.len(), 1); - // // Test address filter - // let part = final_state - // .get_state_changes_part(Slot::new(2, 0), high_address, message.compute_id(), None) - // .unwrap(); - // assert_eq!(part.ledger_changes.0.len(), 1); + true } } diff --git a/massa-final-state/src/lib.rs b/massa-final-state/src/lib.rs index 4d8a02c399d..9aed44cf118 100644 --- a/massa-final-state/src/lib.rs +++ b/massa-final-state/src/lib.rs @@ -32,12 +32,64 @@ //! //! When the crate feature `testing` is enabled, tooling useful for testing purposes is exported. //! See `test_exports/mod.rs` for details. +//! +//! # Network restart documentation +//! +//! ## Goals of the network restart +//! If the blockchain crashes (corrupted / attacked ledger, all nodes crash, etc.) and we want to keep the same main parameters of the network (same `GENESIS_TIMESTAMP`, same ledger, same final_state, etc.), then we can restart the network. +//! +//! **ONE** node should restart from a snapshot (which is just the RocksDB ledger, read as usual), and the other nodes should bootstrap from it. +//! +//! ## Command line +//! +//! ```sh +//! cargo run --release -- --restart-from-snapshot-at-period 200 +//! ``` +//! +//! Means: the node will restart from the ledger and final_state on disk (usual path in the config). Block production will start once the period given in args is reached (here, 200). +//! +//! ## Scenario +//! +//! 1. At period 40, the network crashes. +//! 2. We restart one node N0, at the time of period 80, with `cargo run --release -- --restart-from-snapshot-at-period 200` +//! 3. We start one other node N1, at the time of period 100, with `cargo run --release` +//! 4. The node N1 will bootstrap from N0. No blocks are produced yet. +//! 5. At the time of period 200, block production starts again. +//! +//! ## Additional notes +//! +//! ### Why is block production delayed? +//! +//! In order to give time to all nodes to rejoin the network after a crash and bootstrap. If we don't give them the time, their rolls would be sold because most stakers would have a lot of block miss. +//! +//! ### In sandbox +//! +//! Sandbox feature can be enabled. For instance, here is a test scenario: +//! +//! 1. Run the node as usual: `cargo run --release --features sandbox` +//! 2. Make transaction, buy rolls, etc. +//! 3. Shut down the node at slot S_0. +//! 4. Restart the network: `cargo run --release --features sandbox --restart-from-snapshot-at-period S_1` +//! +//! Here, the network will restart, and the network will start producing blocks again 10 seconds after launch. +//! +//! **/!\ This means that the genesis timestamp will be different between runs, but it should not matter in most cases.** +//! +//! ### Backups +//! +//! By default, the network restarts from the state associated with the last final slot before the shutdown. +//! However, we may sometimes want to recover from an earlier state (e.g. if an attacker stole 50% of all Massa, we want to restart with the state before the attack. +//! We use RocksDB checkpoint system to save the state at regular interval (see the `PERIODS_BETWEEN_BACKUPS` constant in `massa > massa-models > src > config > constants.rs`) +//! Backups for `Slot {period, thread}` are stored in `massa > massa-node > storage > ledger > rocks_db_backup > backup_[period]_[thread]` +//! Backups are hard links of the rocks_db, so the overhead of storing them should be minimal. +//! To recover from a backup, simply replace the contents of the rocks_db folder by the contents of the target backup folder. #![warn(missing_docs)] #![warn(unused_crate_dependencies)] #![feature(hash_drain_filter)] #![feature(async_closure)] #![feature(map_try_insert)] +#![feature(let_chains)] mod config; mod error; diff --git a/massa-final-state/src/mapping_grpc.rs b/massa-final-state/src/mapping_grpc.rs index f45e568fddd..a778ea3784e 100644 --- a/massa-final-state/src/mapping_grpc.rs +++ b/massa-final-state/src/mapping_grpc.rs @@ -1,7 +1,7 @@ // Copyright (c) 2023 MASSA LABS use crate::StateChanges; -use massa_async_pool::{AsyncMessage, AsyncMessageId, Change}; +use massa_async_pool::AsyncMessageId; use massa_ledger_exports::SetUpdateOrDelete; use massa_proto::massa::api::v1 as grpc; @@ -29,31 +29,33 @@ impl From for grpc::StateChanges { .async_pool_changes .0 .into_iter() - .map( - |change: Change| match change { - Change::Add(async_msg_id, async_msg) => grpc::AsyncPoolChangeEntry { - async_message_id: async_msg_id_to_string(async_msg_id), - value: Some(grpc::AsyncPoolChangeValue { - r#type: grpc::AsyncPoolChangeType::Add as i32, - async_message: Some(async_msg.into()), - }), - }, - Change::Activate(async_msg_id) => grpc::AsyncPoolChangeEntry { - async_message_id: async_msg_id_to_string(async_msg_id), - value: Some(grpc::AsyncPoolChangeValue { - r#type: grpc::AsyncPoolChangeType::Activate as i32, - async_message: None, - }), - }, - Change::Delete(async_msg_id) => grpc::AsyncPoolChangeEntry { - async_message_id: async_msg_id_to_string(async_msg_id), - value: Some(grpc::AsyncPoolChangeValue { - r#type: grpc::AsyncPoolChangeType::Delete as i32, - async_message: None, - }), - }, + .map(|(async_msg_id, change)| match change { + SetUpdateOrDelete::Set(async_msg) => grpc::AsyncPoolChangeEntry { + async_message_id: async_msg_id_to_string(async_msg_id), + value: Some(grpc::AsyncPoolChangeValue { + r#type: grpc::AsyncPoolChangeType::Set as i32, + message: Some(grpc::async_pool_change_value::Message::CreatedMessage( + async_msg.into(), + )), + }), }, - ) + SetUpdateOrDelete::Update(async_msg_update) => grpc::AsyncPoolChangeEntry { + async_message_id: async_msg_id_to_string(async_msg_id), + value: Some(grpc::AsyncPoolChangeValue { + r#type: grpc::AsyncPoolChangeType::Update as i32, + message: Some(grpc::async_pool_change_value::Message::UpdatedMessage( + async_msg_update.into(), + )), + }), + }, + SetUpdateOrDelete::Delete => grpc::AsyncPoolChangeEntry { + async_message_id: async_msg_id_to_string(async_msg_id), + value: Some(grpc::AsyncPoolChangeValue { + r#type: grpc::AsyncPoolChangeType::Delete as i32, + message: None, + }), + }, + }) .collect(), ledger_changes: value .ledger_changes diff --git a/massa-final-state/src/state_changes.rs b/massa-final-state/src/state_changes.rs index ce497870f51..d5bd67ffc3d 100644 --- a/massa-final-state/src/state_changes.rs +++ b/massa-final-state/src/state_changes.rs @@ -72,7 +72,7 @@ impl Serializer for StateChangesSerializer { /// use std::str::FromStr; /// use std::collections::BTreeMap; /// use massa_ledger_exports::{LedgerEntryUpdate, SetOrKeep, SetUpdateOrDelete, LedgerChanges}; - /// use massa_async_pool::{AsyncMessage, Change, AsyncPoolChanges}; + /// use massa_async_pool::{AsyncMessage, AsyncPoolChanges}; /// /// let mut state_changes = StateChanges::default(); /// let message = AsyncMessage::new_with_hash( @@ -88,8 +88,12 @@ impl Serializer for StateChangesSerializer { /// Slot::new(3, 0), /// vec![1, 2, 3, 4], /// None, + /// None, /// ); - /// let async_pool_changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message)]); + /// let mut async_pool_changes = AsyncPoolChanges::default(); + /// async_pool_changes + /// .0 + /// .insert(message.compute_id(), SetUpdateOrDelete::Set(message.clone())); /// state_changes.async_pool_changes = async_pool_changes; /// /// let amount = Amount::from_str("1").unwrap(); @@ -191,7 +195,7 @@ impl Deserializer for StateChangesDeserializer { /// use std::str::FromStr; /// use std::collections::BTreeMap; /// use massa_ledger_exports::{LedgerEntryUpdate, SetOrKeep, SetUpdateOrDelete, LedgerChanges}; - /// use massa_async_pool::{AsyncMessage, Change, AsyncPoolChanges}; + /// use massa_async_pool::{AsyncMessage, AsyncPoolChanges}; /// /// let mut state_changes = StateChanges::default(); /// let message = AsyncMessage::new_with_hash( @@ -207,8 +211,12 @@ impl Deserializer for StateChangesDeserializer { /// Slot::new(3, 0), /// vec![1, 2, 3, 4], /// None, + /// None /// ); - /// let async_pool_changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message)]); + /// let mut async_pool_changes = AsyncPoolChanges::default(); + /// async_pool_changes + /// .0 + /// .insert(message.compute_id(), SetUpdateOrDelete::Set(message.clone())); /// state_changes.async_pool_changes = async_pool_changes; /// /// let amount = Amount::from_str("1").unwrap(); @@ -279,7 +287,7 @@ impl StateChanges { pub fn apply(&mut self, changes: StateChanges) { use massa_ledger_exports::Applicable; self.ledger_changes.apply(changes.ledger_changes); - self.async_pool_changes.extend(changes.async_pool_changes); + self.async_pool_changes.apply(changes.async_pool_changes); self.pos_changes.extend(changes.pos_changes); self.executed_ops_changes .extend(changes.executed_ops_changes); diff --git a/massa-final-state/src/test_exports/bootstrap.rs b/massa-final-state/src/test_exports/bootstrap.rs index 733adf43015..a1b6451c7b4 100644 --- a/massa-final-state/src/test_exports/bootstrap.rs +++ b/massa-final-state/src/test_exports/bootstrap.rs @@ -2,109 +2,143 @@ //! This file defines tools to test the final state bootstrap -use std::collections::VecDeque; +use std::sync::Arc; use massa_async_pool::AsyncPool; +use massa_db::{MassaDB, METADATA_CF, STATE_CF, STATE_HASH_KEY}; use massa_executed_ops::{ExecutedDenunciations, ExecutedOps}; -use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_ledger_exports::LedgerController; -use massa_models::slot::Slot; use massa_pos_exports::PoSFinalState; +use massa_versioning::versioning::MipStore; +use parking_lot::RwLock; -use crate::{FinalState, FinalStateConfig, StateChanges}; +use crate::{FinalState, FinalStateConfig}; /// Create a `FinalState` from pre-set values pub fn create_final_state( config: FinalStateConfig, - slot: Slot, ledger: Box, async_pool: AsyncPool, - changes_history: VecDeque<(Slot, StateChanges)>, pos_state: PoSFinalState, executed_ops: ExecutedOps, executed_denunciations: ExecutedDenunciations, + mip_store: MipStore, + db: Arc>, ) -> FinalState { FinalState { config, - slot, ledger, async_pool, - changes_history, pos_state, executed_ops, executed_denunciations, - final_state_hash: Hash::from_bytes(&[0; HASH_SIZE_BYTES]), + mip_store, last_start_period: 0, + last_slot_before_downtime: None, + db, } } /// asserts that two `FinalState` are equal pub fn assert_eq_final_state(v1: &FinalState, v2: &FinalState) { - // compare slot - assert_eq!(v1.slot, v2.slot, "final slot mismatch"); - - // compare final state - massa_ledger_worker::test_exports::assert_eq_ledger(&*v1.ledger, &*v2.ledger); - massa_async_pool::test_exports::assert_eq_async_pool_bootstrap_state( - &v1.async_pool, - &v2.async_pool, - ); - massa_pos_exports::test_exports::assert_eq_pos_state(&v1.pos_state, &v2.pos_state); assert_eq!( - v1.executed_ops.ops.len(), - v2.executed_ops.ops.len(), - "executed_ops.ops lenght mismatch" + v1.db.read().get_change_id().unwrap(), + v2.db.read().get_change_id().unwrap(), + "final slot mismatch" ); assert_eq!( - v1.executed_ops.ops, v2.executed_ops.ops, - "executed_ops.ops mismatch" + v1.last_start_period, v2.last_start_period, + "last_start_period mismatch" ); assert_eq!( - v1.executed_ops.sorted_ops, v2.executed_ops.sorted_ops, - "executed_ops.sorted_ops mismatch" + v1.last_slot_before_downtime, v2.last_slot_before_downtime, + "last_slot_before_downtime mismatch" ); -} -/// asserts that two `FinalState` hashes are equal -pub fn assert_eq_final_state_hash(v1: &FinalState, v2: &FinalState) { + let db1 = v1.db.read(); + let db2 = v2.db.read(); + + let handle_state_db1 = db1.db.cf_handle(STATE_CF).unwrap(); + let handle_state_db2 = db2.db.cf_handle(STATE_CF).unwrap(); + let iter_state_db1 = db1 + .db + .iterator_cf(handle_state_db1, rocksdb::IteratorMode::Start) + .flatten(); + let iter_state_db2 = db2 + .db + .iterator_cf(handle_state_db2, rocksdb::IteratorMode::Start) + .flatten(); + + let handle_metadata_db1 = db1.db.cf_handle(METADATA_CF).unwrap(); + let handle_metadata_db2 = db2.db.cf_handle(METADATA_CF).unwrap(); + let iter_metadata_db1 = db1 + .db + .iterator_cf(handle_metadata_db1, rocksdb::IteratorMode::Start) + .flatten(); + let iter_metadata_db2 = db2 + .db + .iterator_cf(handle_metadata_db2, rocksdb::IteratorMode::Start) + .flatten(); + + let count_1 = iter_state_db1.count(); + let count_2 = iter_state_db2.count(); + + assert_eq!(count_1, count_2, "state count mismatch"); + + let iter_state_db1 = db1 + .db + .iterator_cf(handle_state_db1, rocksdb::IteratorMode::Start) + .flatten(); + let iter_state_db2 = db2 + .db + .iterator_cf(handle_state_db2, rocksdb::IteratorMode::Start) + .flatten(); + + let mut count = 0; + for ((key1, value1), (key2, value2)) in iter_state_db1.zip(iter_state_db2) { + count += 1; + assert_eq!(key1, key2, "{}", format!("state key mismatch {}", count)); + assert_eq!( + value1, + value2, + "{}", + format!("state value n°{} mismatch for key {:?} ", count, key1) + ); + } + + for ((key1, value1), (key2, value2)) in iter_metadata_db1.zip(iter_metadata_db2) { + assert_eq!(key1, key2, "metadata key mismatch"); + if key1.to_vec() != STATE_HASH_KEY.to_vec() { + assert_eq!(value1, value2, "metadata value mismatch"); + } + } + assert_eq!( - v1.ledger.get_ledger_hash(), - v2.ledger.get_ledger_hash(), - "ledger hash mismatch" + v1.pos_state.cycle_history_cache, v2.pos_state.cycle_history_cache, + "pos_state.cycle_history_cache mismatch" ); assert_eq!( - v1.async_pool.hash, v2.async_pool.hash, - "async pool hash mismatch" + v1.pos_state.rng_seed_cache, v2.pos_state.rng_seed_cache, + "pos_state.rng_seed_cache mismatch" ); + assert_eq!( - v1.pos_state.deferred_credits.get_hash(), - v2.pos_state.deferred_credits.get_hash(), - "deferred credits hash mismatch" + v1.async_pool.message_info_cache.len(), + v2.async_pool.message_info_cache.len(), + "async_pool.message_info_cache len mismatch" ); - for (cycle1, cycle2) in v1 - .pos_state - .cycle_history - .iter() - .zip(v2.pos_state.cycle_history.iter()) - { - assert_eq!( - cycle1.roll_counts_hash, cycle2.roll_counts_hash, - "cycle ({}) roll_counts_hash mismatch", - cycle1.cycle - ); - assert_eq!( - cycle1.production_stats_hash, cycle2.production_stats_hash, - "cycle ({}) roll_counts_hash mismatch", - cycle1.cycle - ); - assert_eq!( - cycle1.cycle_global_hash, cycle2.cycle_global_hash, - "cycle ({}) global_hash mismatch", - cycle1.cycle - ); - } + + assert_eq!( + v1.async_pool.message_info_cache, v2.async_pool.message_info_cache, + "async_pool.message_info_cache mismatch" + ); +} + +/// asserts that two `FinalState` hashes are equal +pub fn assert_eq_final_state_hash(v1: &FinalState, v2: &FinalState) { assert_eq!( - v1.executed_ops.hash, v2.executed_ops.hash, - "executed ops hash mismatch" + v1.db.read().get_db_hash(), + v2.db.read().get_db_hash(), + "rocks_db hash mismatch" ); } diff --git a/massa-final-state/src/test_exports/config.rs b/massa-final-state/src/test_exports/config.rs index 2a6db4446c0..76b73191461 100644 --- a/massa-final-state/src/test_exports/config.rs +++ b/massa-final-state/src/test_exports/config.rs @@ -2,45 +2,54 @@ //! This file defines testing tools related to the configuration -use std::path::PathBuf; +use std::{path::PathBuf, sync::Arc}; use crate::{FinalState, FinalStateConfig}; use massa_async_pool::{AsyncPool, AsyncPoolConfig}; +use massa_db::MassaDB; use massa_executed_ops::{ ExecutedDenunciations, ExecutedDenunciationsConfig, ExecutedOps, ExecutedOpsConfig, }; -use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_ledger_exports::LedgerConfig; use massa_ledger_worker::FinalLedger; use massa_models::config::{ - DENUNCIATION_EXPIRE_PERIODS, ENDORSEMENT_COUNT, MAX_DENUNCIATIONS_PER_BLOCK_HEADER, - MAX_DENUNCIATION_CHANGES_LENGTH, -}; -use massa_models::{ - config::{ - DEFERRED_CREDITS_BOOTSTRAP_PART_SIZE, EXECUTED_OPS_BOOTSTRAP_PART_SIZE, PERIODS_PER_CYCLE, - POS_SAVED_CYCLES, THREAD_COUNT, - }, - slot::Slot, + DENUNCIATION_EXPIRE_PERIODS, ENDORSEMENT_COUNT, GENESIS_TIMESTAMP, MAX_DEFERRED_CREDITS_LENGTH, + MAX_DENUNCIATIONS_PER_BLOCK_HEADER, MAX_DENUNCIATION_CHANGES_LENGTH, + MAX_PRODUCTION_STATS_LENGTH, MAX_ROLLS_COUNT_LENGTH, T0, }; +use massa_models::config::{PERIODS_PER_CYCLE, POS_SAVED_CYCLES, THREAD_COUNT}; use massa_pos_exports::{PoSConfig, PoSFinalState}; +use massa_versioning::versioning::{MipStatsConfig, MipStore}; +use parking_lot::RwLock; impl FinalState { - /// Create a final stat - pub fn create_final_state(pos_state: PoSFinalState, config: FinalStateConfig) -> Self { + /// Create a final state + pub fn create_final_state( + pos_state: PoSFinalState, + config: FinalStateConfig, + db: Arc>, + ) -> Self { FinalState { - slot: Slot::new(0, 0), - ledger: Box::new(FinalLedger::new(config.ledger_config.clone(), false)), - async_pool: AsyncPool::new(config.async_pool_config.clone()), + ledger: Box::new(FinalLedger::new(config.ledger_config.clone(), db.clone())), + async_pool: AsyncPool::new(config.async_pool_config.clone(), db.clone()), pos_state, - executed_ops: ExecutedOps::new(config.executed_ops_config.clone()), + executed_ops: ExecutedOps::new(config.executed_ops_config.clone(), db.clone()), executed_denunciations: ExecutedDenunciations::new( config.executed_denunciations_config.clone(), + db.clone(), ), - changes_history: Default::default(), + mip_store: MipStore::try_from(( + [], + MipStatsConfig { + block_count_considered: 10, + counters_max: 10, + }, + )) + .unwrap(), config, - final_state_hash: Hash::from_bytes(&[0; HASH_SIZE_BYTES]), last_start_period: 0, + last_slot_before_downtime: None, + db, } } } @@ -53,17 +62,19 @@ impl Default for FinalStateConfig { async_pool_config: AsyncPoolConfig::default(), executed_ops_config: ExecutedOpsConfig { thread_count: THREAD_COUNT, - bootstrap_part_size: EXECUTED_OPS_BOOTSTRAP_PART_SIZE, }, executed_denunciations_config: ExecutedDenunciationsConfig { denunciation_expire_periods: DENUNCIATION_EXPIRE_PERIODS, - bootstrap_part_size: EXECUTED_OPS_BOOTSTRAP_PART_SIZE, + thread_count: THREAD_COUNT, + endorsement_count: ENDORSEMENT_COUNT, }, pos_config: PoSConfig { periods_per_cycle: PERIODS_PER_CYCLE, thread_count: THREAD_COUNT, cycle_history_length: POS_SAVED_CYCLES, - credits_bootstrap_part_size: DEFERRED_CREDITS_BOOTSTRAP_PART_SIZE, + max_rolls_length: MAX_ROLLS_COUNT_LENGTH, + max_production_stats_length: MAX_PRODUCTION_STATS_LENGTH, + max_credit_length: MAX_DEFERRED_CREDITS_LENGTH, }, final_history_length: 10, thread_count: 2, @@ -73,6 +84,8 @@ impl Default for FinalStateConfig { max_executed_denunciations_length: MAX_DENUNCIATION_CHANGES_LENGTH, initial_seed_string: "".to_string(), max_denunciations_per_block_header: MAX_DENUNCIATIONS_PER_BLOCK_HEADER, + t0: T0, + genesis_timestamp: *GENESIS_TIMESTAMP, } } } diff --git a/massa-final-state/src/tests/mod.rs b/massa-final-state/src/tests/mod.rs index 41a8e020d4a..a4cd0faf22b 100644 --- a/massa-final-state/src/tests/mod.rs +++ b/massa-final-state/src/tests/mod.rs @@ -1 +1,3 @@ //! Copyright (c) 2022 MASSA LABS + +mod scenarios; diff --git a/massa-final-state/src/tests/scenarios.rs b/massa-final-state/src/tests/scenarios.rs new file mode 100644 index 00000000000..d3d55ead1ad --- /dev/null +++ b/massa-final-state/src/tests/scenarios.rs @@ -0,0 +1,218 @@ +//! Copyright (c) 2023 MASSA LABS + +use crate::{ + /*test_exports::{assert_eq_final_state, assert_eq_final_state_hash},*/ + FinalState, FinalStateConfig, StateChanges, +}; +use massa_async_pool::{AsyncMessage, AsyncPoolChanges, AsyncPoolConfig}; +use massa_db::{DBBatch, MassaDB, MassaDBConfig}; +use massa_executed_ops::{ExecutedDenunciationsConfig, ExecutedOpsConfig}; +use massa_ledger_exports::{ + LedgerChanges, LedgerConfig, LedgerEntryUpdate, SetOrKeep, SetUpdateOrDelete, +}; +use massa_ledger_worker::FinalLedger; +use massa_models::address::Address; +use massa_models::amount::Amount; +use massa_models::bytecode::Bytecode; +use massa_models::config::{ + DENUNCIATION_EXPIRE_PERIODS, ENDORSEMENT_COUNT, GENESIS_TIMESTAMP, MAX_DEFERRED_CREDITS_LENGTH, + MAX_DENUNCIATIONS_PER_BLOCK_HEADER, MAX_PRODUCTION_STATS_LENGTH, MAX_ROLLS_COUNT_LENGTH, T0, +}; +use massa_models::config::{ + MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, MAX_DATASTORE_KEY_LENGTH, POS_SAVED_CYCLES, +}; +use massa_models::{config::MAX_DATASTORE_VALUE_LENGTH, slot::Slot}; +use massa_pos_exports::{PoSConfig, SelectorConfig}; +use massa_pos_worker::start_selector_worker; +use parking_lot::RwLock; +use std::collections::BTreeMap; +use std::path::Path; +use std::{path::PathBuf, str::FromStr, sync::Arc}; +use tempfile::TempDir; + +fn create_final_state(temp_dir: &TempDir, reset_final_state: bool) -> Arc> { + let thread_count = 2; + + let db_config = MassaDBConfig { + path: temp_dir.path().to_path_buf(), + max_history_length: 10, + max_new_elements: 100, + thread_count, + }; + let db = Arc::new(RwLock::new(MassaDB::new(db_config))); + + let rolls_path = PathBuf::from_str("../massa-node/base_config/initial_rolls.json").unwrap(); + + let periods_per_cycle = 2; + + let final_state_local_config = FinalStateConfig { + ledger_config: LedgerConfig { + thread_count, + initial_ledger_path: "".into(), + disk_ledger_path: temp_dir.path().to_path_buf(), + max_key_length: MAX_DATASTORE_KEY_LENGTH, + max_datastore_value_length: MAX_DATASTORE_VALUE_LENGTH, + }, + async_pool_config: AsyncPoolConfig { + thread_count, + max_length: MAX_ASYNC_POOL_LENGTH, + max_async_message_data: MAX_ASYNC_MESSAGE_DATA, + max_key_length: MAX_DATASTORE_KEY_LENGTH as u32, + }, + pos_config: PoSConfig { + periods_per_cycle, + thread_count, + cycle_history_length: POS_SAVED_CYCLES, + max_rolls_length: MAX_ROLLS_COUNT_LENGTH, + max_production_stats_length: MAX_PRODUCTION_STATS_LENGTH, + max_credit_length: MAX_DEFERRED_CREDITS_LENGTH, + }, + executed_ops_config: ExecutedOpsConfig { thread_count }, + executed_denunciations_config: ExecutedDenunciationsConfig { + denunciation_expire_periods: DENUNCIATION_EXPIRE_PERIODS, + thread_count, + endorsement_count: ENDORSEMENT_COUNT, + }, + final_history_length: 100, + initial_seed_string: "".into(), + initial_rolls_path: rolls_path, + endorsement_count: ENDORSEMENT_COUNT, + max_executed_denunciations_length: 1000, + thread_count, + periods_per_cycle, + max_denunciations_per_block_header: MAX_DENUNCIATIONS_PER_BLOCK_HEADER, + t0: T0, + genesis_timestamp: *GENESIS_TIMESTAMP, + }; + + // setup selector local config + let selector_local_config = SelectorConfig { + thread_count, + periods_per_cycle, + ..Default::default() + }; + + // start proof-of-stake selectors + let (mut _selector_manager, selector_controller) = + start_selector_worker(selector_local_config.clone()) + .expect("could not start server selector controller"); + + // MIP store + let mip_store = MipStore::try_from(( + [], + MipStatsConfig { + block_count_considered: 10, + counters_max: 10, + }, + )) + .unwrap(); + + // setup final states + + let ledger = FinalLedger::new(final_state_local_config.ledger_config.clone(), db.clone()); + + let final_state = Arc::new(RwLock::new( + FinalState::new( + db.clone(), + final_state_local_config.clone(), + Box::new(ledger), + selector_controller, + mip_store, + reset_final_state, + ) + .unwrap(), + )); + + final_state +} + +use massa_versioning::versioning::{MipStatsConfig, MipStore}; +use std::{fs, io}; + +fn copy_dir_all(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { + for entry in fs::read_dir(src)? { + let entry = entry?; + let ty = entry.file_type()?; + if ty.is_dir() { + copy_dir_all(entry.path(), dst.as_ref().join(entry.file_name()))?; + } else { + fs::copy(entry.path(), dst.as_ref().join(entry.file_name()))?; + } + } + Ok(()) +} + +#[test] +fn test_final_state() { + let temp_dir = TempDir::new().unwrap(); + let temp_dir2 = TempDir::new().unwrap(); + + let hash; + { + let fs = create_final_state(&temp_dir, true); + + let mut batch = DBBatch::new(); + let versioning_batch = DBBatch::new(); + + fs.write().pos_state.create_initial_cycle(&mut batch); + + let slot = fs.read().db.read().get_change_id().unwrap(); + + fs.write() + .db + .write() + .write_batch(batch, versioning_batch, Some(slot)); + + let slot = Slot::new(1, 0); + let mut state_changes = StateChanges::default(); + + let message = AsyncMessage::new_with_hash( + Slot::new(1, 0), + 0, + Address::from_str("AU12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + Address::from_str("AU12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), + String::from("test"), + 10000000, + Amount::from_str("1").unwrap(), + Amount::from_str("1").unwrap(), + Slot::new(2, 0), + Slot::new(3, 0), + vec![1, 2, 3, 4], + None, + None, + ); + let mut async_pool_changes = AsyncPoolChanges::default(); + async_pool_changes.0.insert( + message.compute_id(), + SetUpdateOrDelete::Set(message.clone()), + ); + state_changes.async_pool_changes = async_pool_changes; + + let amount = Amount::from_str("1").unwrap(); + let bytecode = Bytecode(vec![1, 2, 3]); + let ledger_entry = LedgerEntryUpdate { + balance: SetOrKeep::Set(amount), + bytecode: SetOrKeep::Set(bytecode), + datastore: BTreeMap::default(), + }; + let mut ledger_changes = LedgerChanges::default(); + ledger_changes.0.insert( + Address::from_str("AU12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + SetUpdateOrDelete::Update(ledger_entry), + ); + state_changes.ledger_changes = ledger_changes; + + fs.write().finalize(slot, state_changes); + + hash = fs.read().db.read().get_db_hash(); + + fs.write().db.write().db.flush().unwrap(); + } + + copy_dir_all(temp_dir.path(), &temp_dir2.path()).unwrap(); + + let fs2 = create_final_state(&temp_dir2, false); + let hash2 = fs2.read().db.read().get_db_hash(); + + assert_eq!(hash, hash2); +} diff --git a/massa-grpc/Cargo.toml b/massa-grpc/Cargo.toml index a0e5e13607c..186f626794c 100644 --- a/massa-grpc/Cargo.toml +++ b/massa-grpc/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "massa_grpc" -version = "0.1.0" +version = "0.23.0" edition = "2021" description = "GRPC API for Massa Blockchain" repository = "https://github.com/massalabs/massa/" @@ -38,6 +38,7 @@ massa_time = { path = "../massa-time" } massa_wallet = { path = "../massa-wallet" } massa_serialization = { path = "../massa-serialization" } massa_proto = { path = "../massa-proto" } +massa_versioning = { path = "../massa-versioning" } [dev-dependencies] crossbeam = "0.8.2" diff --git a/massa-grpc/src/api.rs b/massa-grpc/src/api.rs index 52164d80f6c..86a6059b326 100644 --- a/massa-grpc/src/api.rs +++ b/massa-grpc/src/api.rs @@ -339,6 +339,29 @@ pub(crate) fn get_largest_stakers( }) } +// Get node version +pub(crate) fn get_mip_status( + grpc: &MassaGrpc, + request: tonic::Request, +) -> Result { + let mip_store_status_ = grpc.mip_store.get_mip_status(); + let mip_store_status: Result, GrpcError> = mip_store_status_ + .iter() + .map(|(mip_info, state_id_)| { + let state_id = grpc::ComponentStateId::from(state_id_); + Ok(grpc::MipStatusEntry { + mip_info: Some(grpc::MipInfo::from(mip_info)), + state_id: i32::from(state_id), + }) + }) + .collect(); + + Ok(grpc::GetMipStatusResponse { + id: request.into_inner().id, + entry: mip_store_status?, + }) +} + /// Get next block best parents pub(crate) fn get_next_block_best_parents( grpc: &MassaGrpc, diff --git a/massa-grpc/src/handler.rs b/massa-grpc/src/handler.rs index 8c60cdb3321..95005adbd7d 100644 --- a/massa-grpc/src/handler.rs +++ b/massa-grpc/src/handler.rs @@ -3,7 +3,7 @@ use massa_proto::massa::api::v1 as grpc; use crate::api::{ - get_blocks, get_blocks_by_slots, get_datastore_entries, get_largest_stakers, + get_blocks, get_blocks_by_slots, get_datastore_entries, get_largest_stakers, get_mip_status, get_next_block_best_parents, get_operations, get_sc_execution_events, get_selector_draws, get_transactions_throughput, get_version, }; @@ -55,6 +55,14 @@ impl grpc::massa_service_server::MassaService for MassaGrpc { Ok(tonic::Response::new(get_largest_stakers(self, request)?)) } + /// handler for get mip status (versioning) + async fn get_mip_status( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + Ok(tonic::Response::new(get_mip_status(self, request)?)) + } + /// handler for get next block best parents async fn get_next_block_best_parents( &self, diff --git a/massa-grpc/src/server.rs b/massa-grpc/src/server.rs index 3b8ee2f35fa..8687aa12405 100644 --- a/massa-grpc/src/server.rs +++ b/massa-grpc/src/server.rs @@ -12,6 +12,8 @@ use massa_proto::massa::api::v1::massa_service_server::MassaServiceServer; use massa_proto::massa::api::v1::FILE_DESCRIPTOR_SET; use massa_protocol_exports::ProtocolController; use massa_storage::Storage; +use massa_versioning::versioning::MipStore; + use tokio::sync::oneshot; use tonic::{ codec::CompressionEncoding, @@ -46,6 +48,8 @@ pub struct MassaGrpc { pub grpc_config: GrpcConfig, /// node version pub version: massa_models::version::Version, + /// mip store + pub mip_store: MipStore, } impl MassaGrpc { diff --git a/massa-grpc/src/tests/test.rs b/massa-grpc/src/tests/test.rs index b827f1ae151..4742831fddf 100644 --- a/massa-grpc/src/tests/test.rs +++ b/massa-grpc/src/tests/test.rs @@ -10,13 +10,15 @@ use massa_models::config::{ MAX_DENUNCIATIONS_PER_BLOCK_HEADER, MAX_ENDORSEMENTS_PER_MESSAGE, MAX_FUNCTION_NAME_LENGTH, MAX_OPERATIONS_PER_BLOCK, MAX_OPERATIONS_PER_MESSAGE, MAX_OPERATION_DATASTORE_ENTRY_COUNT, MAX_OPERATION_DATASTORE_KEY_LENGTH, MAX_OPERATION_DATASTORE_VALUE_LENGTH, MAX_PARAMETERS_SIZE, - PERIODS_PER_CYCLE, T0, THREAD_COUNT, VERSION, + MIP_STORE_STATS_BLOCK_CONSIDERED, MIP_STORE_STATS_COUNTERS_MAX, PERIODS_PER_CYCLE, T0, + THREAD_COUNT, VERSION, }; use massa_pool_exports::test_exports::MockPoolController; use massa_pool_exports::PoolChannels; use massa_pos_exports::test_exports::MockSelectorController; use massa_proto::massa::api::v1::massa_service_client::MassaServiceClient; use massa_protocol_exports::MockProtocolController; +use massa_versioning::versioning::{MipStatsConfig, MipStore}; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, @@ -94,6 +96,13 @@ async fn test_start_grpc_server() { client_certificate_authority_root_path: PathBuf::default(), }; + let mip_stats_config = MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }; + + let mip_store = MipStore::try_from(([], mip_stats_config)).unwrap(); + let service = MassaGrpc { consensus_controller: Box::new(consensus_controller), consensus_channels, @@ -112,6 +121,7 @@ async fn test_start_grpc_server() { storage: shared_storage, grpc_config: grpc_config.clone(), version: *VERSION, + mip_store, }; let stop_handle = service.serve(&grpc_config).await.unwrap(); diff --git a/massa-hash/Cargo.toml b/massa-hash/Cargo.toml index 8512a6eaed0..789c7e719fe 100644 --- a/massa-hash/Cargo.toml +++ b/massa-hash/Cargo.toml @@ -1,18 +1,20 @@ [package] name = "massa_hash" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -blake3 = "1.3" -bs58 = { version = "0.4", features = ["check"] } +blake3 = "=1.3" +bs58 = { version = "=0.4", features = ["check"] } displaydoc = "0.2" serde = { version = "1.0", features = ["derive"] } thiserror = "1.0" -nom = "7.1" +nom = "=7.1" +lsmtree = "=0.1.1" +generic-array = "0.14.7" # custom modules massa_serialization = { path = "../massa-serialization" } diff --git a/massa-hash/src/hash.rs b/massa-hash/src/hash.rs index b7d6615b26a..2d1bb49675a 100644 --- a/massa-hash/src/hash.rs +++ b/massa-hash/src/hash.rs @@ -74,30 +74,22 @@ impl std::fmt::Debug for Hash { } } -/// The bitwise XOR is important to maintain the latest hash of the ~ 1TB ledger. Since the size is big, -/// it is not feasible to hash the whole ledger every 0.5s (when the changes occur). Instead, -/// all the rows of the ledger are hashed and XORed together to get a single hash. Whenever, any -/// row has to be changed, the row's original hash is XORed with the single hash. This essentially nullifies -/// its effect and then the new hash of the changed row is XORed with the single hash. Thus, -/// giving the latest hash without going over the entire ~1TB ledger. This method is as secure as the traditional -/// hash at the same time, it is much more efficient and convenient when the throughput needs are high and the requirement -/// of integrity cannot be compromised. - +/// Previously, the final state hash was a XOR of various hashses. +/// However, this is vulnerable: https://github.com/massalabs/massa/discussions/3852 +/// As a result, we use lsmtree's Sparse Merkle Tree instead, which is not vulnerable to this. +/// We still use bitwise XOR for fingerprinting on some structures. +/// TODO: Remove every usage if this? impl BitXorAssign for Hash { fn bitxor_assign(&mut self, rhs: Self) { *self = *self ^ rhs; } } -/// The bitwise XOR is important to maintain the latest hash of the ~ 1TB ledger. Since the size is big, -/// it is not feasible to hash the whole ledger every 0.5s (when the changes occur). Instead, -/// all the rows of the ledger are hashed and XORed together to get a single hash. Whenever, any -/// row has to be changed, the row's original hash is XORed with the single hash. This essentially nullifies -/// its effect and then the new hash of the changed row is XORed with the single hash. Thus, -/// giving the latest hash without going over the entire ~1TB ledger. This method is as secure as the traditional -/// hash at the same time, it is much more efficient and convenient when the throughput needs are high and the requirement -/// of integrity cannot be compromised. - +/// Previously, the final state hash was a XOR of various hashses. +/// However, this is vulnerable: https://github.com/massalabs/massa/discussions/3852 +/// As a result, we use lsmtree's Sparse Merkle Tree instead, which is not vulnerable to this. +/// We still use bitwise XOR for fingerprinting on some structures. +/// TODO: Remove every usage if this? impl BitXor for Hash { type Output = Self; @@ -108,7 +100,7 @@ impl BitXor for Hash { .zip(other.to_bytes()) .map(|(x, y)| x ^ y) .collect(); - // unwrap won't fail because of the intial byte arrays size + // unwrap won't fail because of the initial byte arrays size let input_bytes: [u8; HASH_SIZE_BYTES] = xored_bytes.try_into().unwrap(); Hash::from_bytes(&input_bytes) } @@ -209,7 +201,7 @@ impl Hash { } /// Serializer for `Hash` -#[derive(Default)] +#[derive(Default, Clone)] pub struct HashSerializer; impl HashSerializer { @@ -375,6 +367,66 @@ impl FromStr for Hash { } } +/// Wrapper around a Blake3 hasher, used for the Sparse Merkle Tree computation +pub struct SmtHasher(blake3::Hasher); +impl lsmtree::digest::OutputSizeUser for SmtHasher { + type OutputSize = lsmtree::digest::typenum::U32; +} + +impl lsmtree::digest::Digest for SmtHasher { + fn new() -> Self { + SmtHasher(blake3::Hasher::new()) + } + + fn new_with_prefix(_: impl AsRef<[u8]>) -> Self { + unreachable!() + } + + fn update(&mut self, data: impl AsRef<[u8]>) { + self.0.update(data.as_ref()); + } + + fn chain_update(self, _: impl AsRef<[u8]>) -> Self { + unreachable!() + } + + fn finalize(self) -> lsmtree::digest::Output { + let hash: [u8; HASH_SIZE_BYTES] = self.0.finalize().into(); + generic_array::GenericArray::from(hash) + } + + fn finalize_into(self, _: &mut lsmtree::digest::Output) { + unreachable!() + } + + fn finalize_reset(&mut self) -> lsmtree::digest::Output { + unreachable!() + } + + fn finalize_into_reset(&mut self, _: &mut lsmtree::digest::Output) { + unreachable!() + } + + fn reset(&mut self) { + unreachable!() + } + + fn output_size() -> usize { + debug_assert_eq!( + HASH_SIZE_BYTES, + ::output_size(), + "lsm_tree hash size is not HASH_SIZE_BYTES" + ); + HASH_SIZE_BYTES + } + + fn digest(data: impl AsRef<[u8]>) -> lsmtree::digest::Output { + let mut h = Self::new(); + h.update(data); + h.finalize() + } +} + #[cfg(test)] mod tests { use serial_test::serial; diff --git a/massa-ledger-exports/Cargo.toml b/massa-ledger-exports/Cargo.toml index 6ba6f2064c1..eb8e36afff6 100644 --- a/massa-ledger-exports/Cargo.toml +++ b/massa-ledger-exports/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_ledger_exports" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -10,17 +10,17 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" tempfile = { version = "3.3", optional = true } # use with testing feature thiserror = "1.0" -nom = "7.1" +nom = "=7.1" +rocksdb = "0.20" +num_enum = "0.5.10" # custom modules massa_proto = { path = "../massa-proto" } massa_hash = { path = "../massa-hash" } massa_models = { path = "../massa-models" } massa_serialization = { path = "../massa-serialization" } -num_enum = "0.5.10" +massa_db = { path = "../massa-db" } -[dev-dependencies] -massa_signature = { path = "../massa-signature" } # for more information on what are the following features used for, see the cargo.toml at workspace level diff --git a/massa-ledger-exports/src/config.rs b/massa-ledger-exports/src/config.rs index 5be051d6603..0484f4c7d28 100644 --- a/massa-ledger-exports/src/config.rs +++ b/massa-ledger-exports/src/config.rs @@ -15,8 +15,6 @@ pub struct LedgerConfig { pub disk_ledger_path: PathBuf, /// max key length pub max_key_length: u8, - /// max ledger part size - pub max_ledger_part_size: u64, /// max datastore value length pub max_datastore_value_length: u64, } diff --git a/massa-ledger-exports/src/controller.rs b/massa-ledger-exports/src/controller.rs index 4b9dee32266..78fe8516631 100644 --- a/massa-ledger-exports/src/controller.rs +++ b/massa-ledger-exports/src/controller.rs @@ -1,23 +1,11 @@ -use massa_hash::Hash; -use massa_models::{ - address::Address, amount::Amount, bytecode::Bytecode, error::ModelsError, slot::Slot, - streaming_step::StreamingStep, -}; +use massa_models::{address::Address, amount::Amount, bytecode::Bytecode}; use std::collections::BTreeSet; use std::fmt::Debug; -use crate::{Key, LedgerChanges, LedgerError}; +use crate::{LedgerChanges, LedgerError}; +use ::massa_db::DBBatch; pub trait LedgerController: Send + Sync + Debug { - /// Allows applying `LedgerChanges` to the final ledger - /// * final_state_data should be non-None only if we are storing a final_state snapshot. - fn apply_changes( - &mut self, - changes: LedgerChanges, - slot: Slot, - final_state_data: Option>, - ); - /// Loads ledger from file fn load_initial_ledger(&mut self) -> Result<(), LedgerError>; @@ -55,34 +43,15 @@ pub trait LedgerController: Send + Sync + Debug { /// A `BTreeSet` of the datastore keys fn get_datastore_keys(&self, addr: &Address) -> Option>>; - /// Get the current disk ledger hash - fn get_ledger_hash(&self) -> Hash; - - /// Get a part of the ledger - /// Used for bootstrap - /// Return: Tuple with data and last key - fn get_ledger_part( - &self, - last_key: StreamingStep, - ) -> Result<(Vec, StreamingStep), ModelsError>; - - /// Set a part of the ledger - /// Used for bootstrap - /// Return: Last key inserted - fn set_ledger_part(&self, data: Vec) -> Result, ModelsError>; - /// Reset the ledger /// /// USED FOR BOOTSTRAP ONLY fn reset(&mut self); - fn set_initial_slot(&mut self, slot: Slot); - - fn get_slot(&self) -> Result; - - fn set_final_state_hash(&mut self, data: Vec); + fn apply_changes_to_batch(&mut self, changes: LedgerChanges, ledger_batch: &mut DBBatch); - fn get_final_state(&self) -> Result, ModelsError>; + /// Deserializes the key and value, useful after bootstrap + fn is_key_value_valid(&self, serialized_key: &[u8], serialized_value: &[u8]) -> bool; /// Get every address and their corresponding balance. /// diff --git a/massa-ledger-exports/src/key.rs b/massa-ledger-exports/src/key.rs index b4c26520739..6d88afdc121 100644 --- a/massa-ledger-exports/src/key.rs +++ b/massa-ledger-exports/src/key.rs @@ -1,3 +1,4 @@ +use massa_db::LEDGER_PREFIX; use massa_models::{ address::{Address, AddressDeserializer, AddressSerializer}, serialization::{VecU8Deserializer, VecU8Serializer}, @@ -135,6 +136,7 @@ impl Key { pub fn datastore_prefix_from_address(address: &Address) -> Vec { let mut prefix = Vec::new(); + prefix.extend(LEDGER_PREFIX.as_bytes()); U64VarIntSerializer::new() .serialize(&KEY_VERSION, &mut prefix) .unwrap(); @@ -180,6 +182,8 @@ impl Serializer for KeySerializer { /// KeySerializer::new(true).serialize(&key, &mut serialized).unwrap(); /// ``` fn serialize(&self, value: &Key, buffer: &mut Vec) -> Result<(), SerializeError> { + buffer.extend(LEDGER_PREFIX.as_bytes()); + self.version_byte_serializer .serialize(&KEY_VERSION, buffer)?; self.address_serializer.serialize(&value.address, buffer)?; @@ -244,7 +248,9 @@ impl Deserializer for KeyDeserializer { &self, buffer: &'a [u8], ) -> nom::IResult<&'a [u8], Key, E> { - let (rest, _version) = self.version_byte_deserializer.deserialize(buffer)?; + let (rest, _version) = self + .version_byte_deserializer + .deserialize(&buffer[LEDGER_PREFIX.as_bytes().len()..])?; let (rest, address) = self.address_deserializer.deserialize(rest)?; let (rest, key_type) = self.key_type_deserializer.deserialize(rest)?; diff --git a/massa-ledger-exports/src/ledger_entry.rs b/massa-ledger-exports/src/ledger_entry.rs index c0860fa95e0..8f98877dde1 100644 --- a/massa-ledger-exports/src/ledger_entry.rs +++ b/massa-ledger-exports/src/ledger_entry.rs @@ -86,7 +86,7 @@ impl Serializer for LedgerEntrySerializer { /// Deserializer for `LedgerEntry` pub struct LedgerEntryDeserializer { - amount_deserializer: AmountDeserializer, + pub amount_deserializer: AmountDeserializer, bytecode_deserializer: BytecodeDeserializer, datastore_deserializer: DatastoreDeserializer, } diff --git a/massa-ledger-exports/src/lib.rs b/massa-ledger-exports/src/lib.rs index f9cca164dac..a127a956763 100644 --- a/massa-ledger-exports/src/lib.rs +++ b/massa-ledger-exports/src/lib.rs @@ -26,7 +26,10 @@ pub use ledger_changes::{ LedgerEntryUpdateDeserializer, LedgerEntryUpdateSerializer, }; pub use ledger_entry::{LedgerEntry, LedgerEntryDeserializer, LedgerEntrySerializer}; -pub use types::{Applicable, SetOrDelete, SetOrKeep, SetUpdateOrDelete}; +pub use types::{ + Applicable, SetOrDelete, SetOrKeep, SetOrKeepDeserializer, SetOrKeepSerializer, + SetUpdateOrDelete, SetUpdateOrDeleteDeserializer, SetUpdateOrDeleteSerializer, +}; #[cfg(feature = "testing")] pub mod test_exports; diff --git a/massa-ledger-exports/src/test_exports/config.rs b/massa-ledger-exports/src/test_exports/config.rs index 42a58cd6882..16c43492312 100644 --- a/massa-ledger-exports/src/test_exports/config.rs +++ b/massa-ledger-exports/src/test_exports/config.rs @@ -2,10 +2,7 @@ /// This file defines testing tools related to the configuration use massa_models::{ address::Address, - config::{ - LEDGER_PART_SIZE_MESSAGE_BYTES, MAX_DATASTORE_KEY_LENGTH, MAX_DATASTORE_VALUE_LENGTH, - THREAD_COUNT, - }, + config::{MAX_DATASTORE_KEY_LENGTH, MAX_DATASTORE_VALUE_LENGTH, THREAD_COUNT}, }; use std::collections::HashMap; use std::io::Seek; @@ -23,7 +20,6 @@ impl Default for LedgerConfig { disk_ledger_path: "".into(), thread_count: THREAD_COUNT, max_key_length: MAX_DATASTORE_KEY_LENGTH, - max_ledger_part_size: LEDGER_PART_SIZE_MESSAGE_BYTES, max_datastore_value_length: MAX_DATASTORE_VALUE_LENGTH, } } @@ -45,7 +41,6 @@ impl LedgerConfig { initial_ledger_path: initial_ledger.path().to_path_buf(), disk_ledger_path: disk_ledger.path().to_path_buf(), max_key_length: MAX_DATASTORE_KEY_LENGTH, - max_ledger_part_size: LEDGER_PART_SIZE_MESSAGE_BYTES, thread_count: THREAD_COUNT, max_datastore_value_length: MAX_DATASTORE_VALUE_LENGTH, }, diff --git a/massa-ledger-worker/Cargo.toml b/massa-ledger-worker/Cargo.toml index 19b63573885..911539994f0 100644 --- a/massa-ledger-worker/Cargo.toml +++ b/massa-ledger-worker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_ledger_worker" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -8,17 +8,17 @@ edition = "2021" serde_json = "1.0" tempfile = { version = "3.3", optional = true } # use with testing feature rocksdb = "0.20" -nom = "7.1" -tracing = "0.1" +parking_lot = { version = "0.12", features = ["deadlock_detection"] } # custom modules massa_ledger_exports = { path = "../massa-ledger-exports" } massa_models = { path = "../massa-models" } massa_serialization = { path = "../massa-serialization" } -massa_hash = { path = "../massa-hash" } +massa_db = { path = "../massa-db" } [dev-dependencies] massa_signature = { path = "../massa-signature" } +massa_hash = { path = "../massa-hash" } tempfile = "3.3" # for more information on what are the following features used for, see the cargo.toml at workspace level diff --git a/massa-ledger-worker/src/ledger.rs b/massa-ledger-worker/src/ledger.rs index 692f82722d9..736f92b1f93 100644 --- a/massa-ledger-worker/src/ledger.rs +++ b/massa-ledger-worker/src/ledger.rs @@ -3,22 +3,22 @@ //! This file defines the final ledger associating addresses to their balances, bytecode and data. use crate::ledger_db::{LedgerDB, LedgerSubEntry}; -use massa_hash::Hash; +use massa_db::{DBBatch, MassaDB}; use massa_ledger_exports::{ - Key, LedgerChanges, LedgerConfig, LedgerController, LedgerEntry, LedgerError, + LedgerChanges, LedgerConfig, LedgerController, LedgerEntry, LedgerError, }; use massa_models::{ address::Address, amount::{Amount, AmountDeserializer}, bytecode::{Bytecode, BytecodeDeserializer}, - error::ModelsError, - slot::Slot, - streaming_step::StreamingStep, }; use massa_serialization::{DeserializeError, Deserializer}; -use nom::AsBytes; -use std::collections::{BTreeSet, HashMap}; +use parking_lot::RwLock; use std::ops::Bound::Included; +use std::{ + collections::{BTreeSet, HashMap}, + sync::Arc, +}; /// Represents a final ledger associating addresses to their balances, bytecode and data. /// The final ledger is part of the final state which is attached to a final slot, can be bootstrapped and allows others to bootstrap. @@ -34,14 +34,13 @@ pub struct FinalLedger { impl FinalLedger { /// Initializes a new `FinalLedger` by reading its initial state from file. - pub fn new(config: LedgerConfig, with_final_state: bool) -> Self { + pub fn new(config: LedgerConfig, db: Arc>) -> Self { // create and initialize the disk ledger let sorted_ledger = LedgerDB::new( - config.disk_ledger_path.clone(), + db, config.thread_count, config.max_key_length, - config.max_ledger_part_size, - with_final_state, + config.max_datastore_value_length, ); // generate the final ledger @@ -53,17 +52,6 @@ impl FinalLedger { } impl LedgerController for FinalLedger { - /// Allows applying `LedgerChanges` to the final ledger - fn apply_changes( - &mut self, - changes: LedgerChanges, - slot: Slot, - final_state_data: Option>, - ) { - self.sorted_ledger - .apply_changes(changes, slot, final_state_data); - } - /// Loads ledger from file fn load_initial_ledger(&mut self) -> Result<(), LedgerError> { // load the ledger tree from file @@ -158,34 +146,6 @@ impl LedgerController for FinalLedger { self.sorted_ledger.get_datastore_keys(addr) } - /// Get the current disk ledger hash - fn get_ledger_hash(&self) -> Hash { - self.sorted_ledger.get_ledger_hash() - } - - /// Get a part of the disk ledger. - /// - /// Solely used by the bootstrap. - /// - /// # Returns - /// A tuple containing the data and the last returned key - fn get_ledger_part( - &self, - last_key: StreamingStep, - ) -> Result<(Vec, StreamingStep), ModelsError> { - self.sorted_ledger.get_ledger_part(last_key) - } - - /// Set a part of the disk ledger. - /// - /// Solely used by the bootstrap. - /// - /// # Returns - /// The last key inserted - fn set_ledger_part(&self, data: Vec) -> Result, ModelsError> { - self.sorted_ledger.set_ledger_part(data.as_bytes()) - } - /// Reset the disk ledger. /// /// USED FOR BOOTSTRAP ONLY @@ -193,24 +153,16 @@ impl LedgerController for FinalLedger { self.sorted_ledger.reset(); } - fn set_initial_slot(&mut self, slot: Slot) { - self.sorted_ledger.set_initial_slot(slot); - } - - /// Get the slot associated with the current ledger - fn get_slot(&self) -> Result { - self.sorted_ledger.get_slot() - } - - /// Set the final_state_hash of the slot associated with the current ledger - /// Can be used to verify the integrity of the final state saved when restarting from snapshot - fn set_final_state_hash(&mut self, data: Vec) { - self.sorted_ledger.set_final_state_hash(&data) + /// Allows applying `LedgerChanges` to the final ledger + fn apply_changes_to_batch(&mut self, changes: LedgerChanges, ledger_batch: &mut DBBatch) { + self.sorted_ledger + .apply_changes_to_batch(changes, ledger_batch); } - /// Get the final state stored in the ledger, to restart from snapshot - fn get_final_state(&self) -> Result, ModelsError> { - self.sorted_ledger.get_final_state() + /// Deserializes the key and value, useful after bootstrap + fn is_key_value_valid(&self, serialized_key: &[u8], serialized_value: &[u8]) -> bool { + self.sorted_ledger + .is_key_value_valid(serialized_key, serialized_value) } /// Get every address and their corresponding balance. diff --git a/massa-ledger-worker/src/ledger_db.rs b/massa-ledger-worker/src/ledger_db.rs index 1d8ea15caf8..a75e20eb04d 100644 --- a/massa-ledger-worker/src/ledger_db.rs +++ b/massa-ledger-worker/src/ledger_db.rs @@ -2,52 +2,21 @@ //! Module to interact with the disk ledger -use massa_hash::{Hash, HASH_SIZE_BYTES}; +use massa_db::{DBBatch, MassaDB, CF_ERROR, CRUD_ERROR, KEY_SER_ERROR, LEDGER_PREFIX, STATE_CF}; use massa_ledger_exports::*; +use massa_models::amount::AmountDeserializer; +use massa_models::bytecode::BytecodeDeserializer; use massa_models::{ - address::Address, - amount::AmountSerializer, - bytecode::BytecodeSerializer, - error::ModelsError, - serialization::{VecU8Deserializer, VecU8Serializer}, - slot::{Slot, SlotDeserializer, SlotSerializer}, - streaming_step::StreamingStep, -}; -use massa_serialization::{DeserializeError, Deserializer, Serializer, U64VarIntSerializer}; -use nom::multi::many0; -use nom::sequence::tuple; -use rocksdb::{ - ColumnFamily, ColumnFamilyDescriptor, Direction, IteratorMode, Options, ReadOptions, - WriteBatch, DB, + address::Address, amount::AmountSerializer, bytecode::BytecodeSerializer, slot::Slot, }; +use massa_serialization::{DeserializeError, Deserializer, Serializer}; +use parking_lot::RwLock; +use rocksdb::{Direction, IteratorMode, ReadOptions}; +use std::collections::{BTreeSet, HashMap}; +use std::{fmt::Debug, sync::Arc}; + +use massa_models::amount::Amount; use std::ops::Bound; -use std::path::PathBuf; -use std::rc::Rc; -use std::{collections::BTreeMap, fmt::Debug}; -use std::{ - collections::{BTreeSet, HashMap}, - convert::TryInto, -}; -use tracing::{debug, info}; - -#[cfg(feature = "testing")] -use massa_models::amount::{Amount, AmountDeserializer}; - -const LEDGER_CF: &str = "ledger"; -const METADATA_CF: &str = "metadata"; -const FINAL_STATE_CF: &str = "final_state"; -const OPEN_ERROR: &str = "critical: rocksdb open operation failed"; -const CRUD_ERROR: &str = "critical: rocksdb crud operation failed"; -const CF_ERROR: &str = "critical: rocksdb column family operation failed"; -const LEDGER_HASH_ERROR: &str = "critical: saved ledger hash is corrupted"; -const KEY_DESER_ERROR: &str = "critical: key deserialization failed"; -const KEY_SER_ERROR: &str = "critical: key serialization failed"; -const KEY_LEN_SER_ERROR: &str = "critical: key length serialization failed"; -const SLOT_KEY: &[u8; 1] = b"s"; -const LEDGER_HASH_KEY: &[u8; 1] = b"h"; -const LEDGER_FINAL_STATE_KEY: &[u8; 2] = b"fs"; -const LEDGER_FINAL_STATE_HASH_KEY: &[u8; 3] = b"fsh"; -const LEDGER_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; /// Ledger sub entry enum pub enum LedgerSubEntry { @@ -72,46 +41,22 @@ impl LedgerSubEntry { /// Disk ledger DB module /// /// Contains a `RocksDB` DB instance -pub(crate) struct LedgerDB { - db: DB, +pub struct LedgerDB { + db: Arc>, thread_count: u8, - key_serializer: KeySerializer, key_serializer_db: KeySerializer, - key_deserializer: KeyDeserializer, key_deserializer_db: KeyDeserializer, amount_serializer: AmountSerializer, bytecode_serializer: BytecodeSerializer, - slot_serializer: SlotSerializer, - slot_deserializer: SlotDeserializer, - len_serializer: U64VarIntSerializer, - ledger_part_size_message_bytes: u64, - #[cfg(feature = "testing")] amount_deserializer: AmountDeserializer, + bytecode_deserializer: BytecodeDeserializer, + max_datastore_value_length: u64, } impl Debug for LedgerDB { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:#?}", self.db) - } -} - -/// Batch containing write operations to perform on disk and cache for the ledger hash computing -pub struct LedgerBatch { - // Rocksdb write batch - write_batch: WriteBatch, - // Ledger hash state in the current batch - ledger_hash: Hash, - // Added entry hashes in the current batch - aeh_list: BTreeMap, Hash>, -} - -impl LedgerBatch { - pub fn new(ledger_hash: Hash) -> Self { - Self { - write_batch: WriteBatch::default(), - ledger_hash, - aeh_list: BTreeMap::new(), - } + let db = self.db.read(); + write!(f, "{:#?}", db) } } @@ -121,156 +66,71 @@ impl LedgerDB { /// # Arguments /// * path: path to the desired disk ledger db directory pub fn new( - path: PathBuf, + db: Arc>, thread_count: u8, max_datastore_key_length: u8, - ledger_part_size_message_bytes: u64, - with_final_state: bool, + max_datastore_value_length: u64, ) -> Self { - let mut db_opts = Options::default(); - db_opts.create_if_missing(true); - db_opts.create_missing_column_families(true); - - info!("Init LedgerDB, with_final_state = {}", with_final_state); - debug!("Init LedgerDB, with_final_state = {}", with_final_state); - - let db = if with_final_state { - DB::open_cf_descriptors( - &db_opts, - path, - vec![ - ColumnFamilyDescriptor::new(LEDGER_CF, Options::default()), - ColumnFamilyDescriptor::new(METADATA_CF, Options::default()), - ColumnFamilyDescriptor::new(FINAL_STATE_CF, Options::default()), - ], - ) - .expect(OPEN_ERROR) - } else { - DB::open_cf_descriptors( - &db_opts, - path, - vec![ - ColumnFamilyDescriptor::new(LEDGER_CF, Options::default()), - ColumnFamilyDescriptor::new(METADATA_CF, Options::default()), - ], - ) - .expect(OPEN_ERROR) - }; - LedgerDB { db, thread_count, - key_serializer: KeySerializer::new(true), key_serializer_db: KeySerializer::new(false), - key_deserializer: KeyDeserializer::new(max_datastore_key_length, true), key_deserializer_db: KeyDeserializer::new(max_datastore_key_length, false), amount_serializer: AmountSerializer::new(), bytecode_serializer: BytecodeSerializer::new(), - slot_serializer: SlotSerializer::new(), - slot_deserializer: SlotDeserializer::new( - (Bound::Included(u64::MIN), Bound::Included(u64::MAX)), - (Bound::Included(0_u8), Bound::Excluded(thread_count)), - ), - len_serializer: U64VarIntSerializer::new(), - ledger_part_size_message_bytes, - #[cfg(feature = "testing")] amount_deserializer: AmountDeserializer::new( Bound::Included(Amount::MIN), Bound::Included(Amount::MAX), ), + bytecode_deserializer: BytecodeDeserializer::new(max_datastore_value_length), + max_datastore_value_length, } } - pub fn set_initial_slot(&mut self, slot: Slot) { - let ledger_hash = self.get_ledger_hash(); - let mut batch = LedgerBatch::new(ledger_hash); - self.set_slot(slot, &mut batch); - self.write_batch(batch); - } - /// Loads the initial disk ledger /// /// # Arguments pub fn load_initial_ledger(&mut self, initial_ledger: HashMap) { - // initial ledger_hash value to avoid matching an option in every XOR operation - // because of a one time case being an empty ledger - let ledger_hash = Hash::from_bytes(LEDGER_HASH_INITIAL_BYTES); - let mut batch = LedgerBatch::new(ledger_hash); + let mut batch = DBBatch::new(); + for (address, entry) in initial_ledger { self.put_entry(&address, entry, &mut batch); } - self.set_slot( - Slot::new(0, self.thread_count.saturating_sub(1)), - &mut batch, + + self.db.write().write_batch( + batch, + Default::default(), + Some(Slot::new(0, self.thread_count.saturating_sub(1))), ); - self.write_batch(batch); } /// Allows applying `LedgerChanges` to the disk ledger /// /// # Arguments /// * changes: ledger changes to be applied - /// * slot: new slot associated to the final ledger - /// * final_state_data: the serialized final state data to include, in case we use the feature `create_snapshot` - pub fn apply_changes( - &mut self, - changes: LedgerChanges, - slot: Slot, - final_state_data: Option>, - ) { - // create the batch - let mut batch = LedgerBatch::new(self.get_ledger_hash()); + /// * batch: the batch to apply the changes to + pub fn apply_changes_to_batch(&self, changes: LedgerChanges, batch: &mut DBBatch) { // for all incoming changes for (addr, change) in changes.0 { match change { // the incoming change sets a ledger entry to a new one SetUpdateOrDelete::Set(new_entry) => { // inserts/overwrites the entry with the incoming one - self.put_entry(&addr, new_entry, &mut batch); + self.put_entry(&addr, new_entry, batch); } // the incoming change updates an existing ledger entry SetUpdateOrDelete::Update(entry_update) => { // applies the updates to the entry // if the entry does not exist, inserts a default one and applies the updates to it - self.update_entry(&addr, entry_update, &mut batch); + self.update_entry(&addr, entry_update, batch); } // the incoming change deletes a ledger entry SetUpdateOrDelete::Delete => { // delete the entry, if it exists - self.delete_entry(&addr, &mut batch); + self.delete_entry(&addr, batch); } } } - // set the associated slot in metadata - self.set_slot(slot, &mut batch); - - if let Some(final_state) = final_state_data { - let fs_handle = self.db.cf_handle(FINAL_STATE_CF).expect(CF_ERROR); - batch - .write_batch - .put_cf(fs_handle, LEDGER_FINAL_STATE_KEY, final_state); - } - - // write the batch - self.write_batch(batch); - } - - /// Get the current disk ledger hash - pub fn get_ledger_hash(&self) -> Hash { - let handle = self.db.cf_handle(METADATA_CF).expect(CF_ERROR); - if let Some(ledger_hash_bytes) = self - .db - .get_pinned_cf(handle, LEDGER_HASH_KEY) - .expect(CRUD_ERROR) - .as_deref() - { - Hash::from_bytes(ledger_hash_bytes.try_into().expect(LEDGER_HASH_ERROR)) - } else { - // initial ledger_hash value to avoid matching an option in every XOR operation - // because of a one time case being an empty ledger - // also note that the if you XOR a hash with itself result is LEDGER_HASH_INITIAL_BYTES - Hash::from_bytes(LEDGER_HASH_INITIAL_BYTES) - } } /// Get the given sub-entry of a given address. @@ -282,13 +142,14 @@ impl LedgerDB { /// # Returns /// An Option of the sub-entry value as bytes pub fn get_sub_entry(&self, addr: &Address, ty: LedgerSubEntry) -> Option> { - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); let key = ty.derive_key(addr); let mut serialized_key = Vec::new(); self.key_serializer_db .serialize(&key, &mut serialized_key) .expect(KEY_SER_ERROR); - self.db.get_cf(handle, serialized_key).expect(CRUD_ERROR) + db.db.get_cf(handle, serialized_key).expect(CRUD_ERROR) } /// Get every key of the datastore for a given address. @@ -296,14 +157,15 @@ impl LedgerDB { /// # Returns /// A `BTreeSet` of the datastore keys pub fn get_datastore_keys(&self, addr: &Address) -> Option>> { - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); let mut opt = ReadOptions::default(); let key_prefix = datastore_prefix_from_address(addr); opt.set_iterate_range(key_prefix.clone()..end_prefix(&key_prefix).unwrap()); - let mut iter = self + let mut iter = db .db .iterator_cf_opt(handle, opt, IteratorMode::Start) .flatten() @@ -328,225 +190,62 @@ impl LedgerDB { Some(iter.collect()) } - /// Get a part of the disk Ledger. - /// Mainly used in the bootstrap process. - /// - /// # Arguments - /// * `last_key`: key where the part retrieving must start - /// - /// # Returns - /// A tuple containing: - /// * The ledger part as bytes - /// * The last taken key (this is an optimization to easily keep a reference to the last key) - pub fn get_ledger_part( - &self, - cursor: StreamingStep, - ) -> Result<(Vec, StreamingStep), ModelsError> { - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); - let opt = ReadOptions::default(); - let ser = VecU8Serializer::new(); - let mut ledger_part = Vec::new(); - - // Creates an iterator from the next element after the last if defined, otherwise initialize it at the first key of the ledger. - let (db_iterator, mut new_cursor) = match cursor { - StreamingStep::Started => ( - self.db.iterator_cf_opt(handle, opt, IteratorMode::Start), - StreamingStep::::Started, - ), - StreamingStep::Ongoing(last_key) => { - let mut serialized_key = Vec::new(); - self.key_serializer_db - .serialize(&last_key, &mut serialized_key)?; - let mut iter = self.db.iterator_cf_opt( - handle, - opt, - IteratorMode::From(&serialized_key, Direction::Forward), - ); - iter.next(); - (iter, StreamingStep::Finished(None)) - } - StreamingStep::::Finished(_) => return Ok((ledger_part, cursor)), - }; - - // Iterates over the whole database - for (key, entry) in db_iterator.flatten() { - if (ledger_part.len() as u64) < (self.ledger_part_size_message_bytes) { - // We deserialize and re-serialize the key to change the key format from the - // database one to a format we can use outside of the ledger. - let (_, key) = self.key_deserializer_db.deserialize(&key)?; - self.key_serializer.serialize(&key, &mut ledger_part)?; - ser.serialize(&entry.to_vec(), &mut ledger_part)?; - new_cursor = StreamingStep::Ongoing(key); - } else { - break; - } - } - Ok((ledger_part, new_cursor)) + pub fn reset(&self) { + self.db.write().delete_prefix(LEDGER_PREFIX, STATE_CF, None); } - /// Set a part of the ledger in the database. - /// We deserialize in this function because we insert in the ledger while deserializing. - /// Used for bootstrap. - /// - /// # Arguments - /// * data: must be the serialized version provided by `get_ledger_part` - /// - /// # Returns - /// The last key of the inserted entry (this is an optimization to easily keep a reference to the last key) - pub fn set_ledger_part<'a>(&self, data: &'a [u8]) -> Result, ModelsError> { - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); - let vec_u8_deserializer = - VecU8Deserializer::new(Bound::Included(0), Bound::Excluded(u64::MAX)); - let mut last_key: Rc> = Rc::new(None); - let mut batch = LedgerBatch::new(self.get_ledger_hash()); - - // Since this data is coming from the network, deser to address and ser back to bytes for a security check. - let (rest, _) = many0(|input: &'a [u8]| { - let (rest, (key, value)) = tuple(( - |input| self.key_deserializer.deserialize(input), - |input| vec_u8_deserializer.deserialize(input), - ))(input)?; - *Rc::get_mut(&mut last_key).ok_or_else(|| { - nom::Err::Error(nom::error::Error::new(input, nom::error::ErrorKind::Fail)) - })? = Some(key.clone()); - self.put_entry_value(handle, &mut batch, &key, &value); - Ok((rest, ())) - })(data) - .map_err(|_| ModelsError::SerializeError("Error in deserialization".to_string()))?; - - match last_key.as_ref() { - Some(last_key) => { - if rest.is_empty() { - self.write_batch(batch); - Ok(StreamingStep::Ongoing(last_key.clone())) - } else { - Err(ModelsError::SerializeError( - "Error in deserialization".to_string(), - )) - } - } - None => Ok(StreamingStep::Finished(None)), + /// Deserializes the key and value, useful after bootstrap + pub fn is_key_value_valid(&self, serialized_key: &[u8], serialized_value: &[u8]) -> bool { + if !serialized_key.starts_with(LEDGER_PREFIX.as_bytes()) { + return false; } - } - - pub fn reset(&mut self) { - self.db - .drop_cf(LEDGER_CF) - .expect("Error dropping ledger cf"); - self.db - .drop_cf(METADATA_CF) - .expect("Error dropping metadata cf"); - let mut db_opts = Options::default(); - db_opts.set_error_if_exists(true); - self.db - .create_cf(LEDGER_CF, &db_opts) - .expect("Error creating ledger cf"); - self.db - .create_cf(METADATA_CF, &db_opts) - .expect("Error creating metadata cf"); - } - - pub fn set_final_state_hash(&mut self, data: &[u8]) { - let handle = self.db.cf_handle(FINAL_STATE_CF).expect(CF_ERROR); - let mut batch = WriteBatch::default(); - - batch.put_cf(handle, LEDGER_FINAL_STATE_HASH_KEY, data); - self.db.write(batch).expect(CRUD_ERROR); - } - - pub fn get_final_state(&self) -> Result, ModelsError> { - let handle = self.db.cf_handle(FINAL_STATE_CF).expect(CF_ERROR); - let opt = ReadOptions::default(); - let Ok(Some(final_state_data)) = self.db.get_cf_opt(handle, LEDGER_FINAL_STATE_KEY, &opt) else { - return Err(ModelsError::BufferError(String::from("Could not recover final_state_data"))); - }; - let Ok(Some(final_state_hash)) = self.db.get_pinned_cf_opt(handle, LEDGER_FINAL_STATE_HASH_KEY, &opt) else { - return Err(ModelsError::BufferError(String::from("Could not recover final_state_hash"))); + let Ok((rest, key)) = self.key_deserializer_db.deserialize::(serialized_key) else { + return false; }; + if !rest.is_empty() { + return false; + } - let mut final_state = final_state_data; - final_state.extend_from_slice(&final_state_hash); + match key.key_type { + KeyType::BALANCE => { + let Ok((rest, _amount)) = self.amount_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + KeyType::BYTECODE => { + let Ok((rest, _bytecode)) = self.bytecode_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + KeyType::DATASTORE(_) => { + if serialized_value.len() >= self.max_datastore_value_length as usize { + return false; + } + } + } - Ok(final_state) + true } } // Private helpers impl LedgerDB { - /// Apply the given operation batch to the disk ledger - fn write_batch(&self, mut batch: LedgerBatch) { - let handle = self.db.cf_handle(METADATA_CF).expect(CF_ERROR); - batch - .write_batch - .put_cf(handle, LEDGER_HASH_KEY, batch.ledger_hash.to_bytes()); - self.db.write(batch.write_batch).expect(CRUD_ERROR); - } - - /// Set the disk ledger slot metadata - /// - /// # Arguments - /// * slot: associated slot of the current ledger - /// * batch: the given operation batch to update - fn set_slot(&self, slot: Slot, batch: &mut LedgerBatch) { - let handle = self.db.cf_handle(METADATA_CF).expect(CF_ERROR); - let mut slot_bytes = Vec::new(); - // Slot serialization never fails - self.slot_serializer - .serialize(&slot, &mut slot_bytes) - .unwrap(); - batch - .write_batch - .put_cf(handle, SLOT_KEY, slot_bytes.clone()); - // XOR previous slot and new one - if let Some(prev_bytes) = self.db.get_pinned_cf(handle, SLOT_KEY).expect(CRUD_ERROR) { - batch.ledger_hash ^= Hash::compute_from(&prev_bytes); - } - batch.ledger_hash ^= Hash::compute_from(&slot_bytes); - } - - pub fn get_slot(&self) -> Result { - let handle = self.db.cf_handle(METADATA_CF).expect(CF_ERROR); - - let Ok(Some(slot_bytes)) = self.db.get_pinned_cf(handle, SLOT_KEY) else { - return Err(ModelsError::BufferError(String::from("Could not recover final_state_hash"))); - }; - - let (_rest, slot) = self.slot_deserializer.deserialize(&slot_bytes)?; - - Ok(slot) - } - - /// Internal function to put a key & value and perform the ledger hash XORs - fn put_entry_value( - &self, - handle: &ColumnFamily, - batch: &mut LedgerBatch, - key: &Key, - value: &[u8], - ) { - let mut serialized_key = Vec::new(); - self.key_serializer_db - .serialize(key, &mut serialized_key) - .expect(KEY_SER_ERROR); - let mut len_bytes = Vec::new(); - self.len_serializer - .serialize(&(serialized_key.len() as u64), &mut len_bytes) - .expect(KEY_LEN_SER_ERROR); - let hash = Hash::compute_from(&[&len_bytes, &serialized_key, value].concat()); - batch.ledger_hash ^= hash; - batch.aeh_list.insert(serialized_key.clone(), hash); - batch.write_batch.put_cf(handle, serialized_key, value); - } - /// Add every sub-entry individually for a given entry. /// /// # Arguments /// * `addr`: associated address /// * `ledger_entry`: complete entry to be added /// * `batch`: the given operation batch to update - fn put_entry(&mut self, addr: &Address, ledger_entry: LedgerEntry, batch: &mut LedgerBatch) { - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); + fn put_entry(&self, addr: &Address, ledger_entry: LedgerEntry, batch: &mut DBBatch) { + let db = self.db.read(); + // Amount serialization never fails let mut bytes_balance = Vec::new(); self.amount_serializer @@ -559,63 +258,30 @@ impl LedgerDB { .unwrap(); // balance - self.put_entry_value( - handle, - batch, - &Key::new(addr, KeyType::BALANCE), - &bytes_balance, - ); + let mut serialized_key = Vec::new(); + self.key_serializer_db + .serialize(&Key::new(addr, KeyType::BALANCE), &mut serialized_key) + .expect(KEY_SER_ERROR); + db.put_or_update_entry_value(batch, serialized_key, &bytes_balance); // bytecode - self.put_entry_value( - handle, - batch, - &Key::new(addr, KeyType::BYTECODE), - &bytes_bytecode, - ); - - // datastore - for (hash, entry) in ledger_entry.datastore { - self.put_entry_value( - handle, - batch, - &Key::new(addr, KeyType::DATASTORE(hash)), - &entry, - ); - } - } - - /// Internal function to update a key & value and perform the ledger hash XORs - fn update_key_value( - &self, - handle: &ColumnFamily, - batch: &mut LedgerBatch, - key: &Key, - value: &[u8], - ) { let mut serialized_key = Vec::new(); self.key_serializer_db - .serialize(key, &mut serialized_key) + .serialize(&Key::new(addr, KeyType::BYTECODE), &mut serialized_key) .expect(KEY_SER_ERROR); + db.put_or_update_entry_value(batch, serialized_key, &bytes_bytecode); - let mut len_bytes = Vec::new(); - self.len_serializer - .serialize(&(serialized_key.len() as u64), &mut len_bytes) - .expect(KEY_LEN_SER_ERROR); - if let Some(added_hash) = batch.aeh_list.get(&serialized_key) { - batch.ledger_hash ^= *added_hash; - } else if let Some(prev_bytes) = self - .db - .get_pinned_cf(handle, &serialized_key) - .expect(CRUD_ERROR) - { - batch.ledger_hash ^= - Hash::compute_from(&[&len_bytes, &serialized_key, &prev_bytes[..]].concat()); + // datastore + for (hash, entry) in ledger_entry.datastore { + let mut serialized_key = Vec::new(); + self.key_serializer_db + .serialize( + &Key::new(addr, KeyType::DATASTORE(hash)), + &mut serialized_key, + ) + .expect(KEY_SER_ERROR); + db.put_or_update_entry_value(batch, serialized_key, &entry); } - let hash = Hash::compute_from(&[&len_bytes, &serialized_key, value].concat()); - batch.ledger_hash ^= hash; - batch.aeh_list.insert(serialized_key.clone(), hash); - batch.write_batch.put_cf(handle, serialized_key, value); } /// Update the ledger entry of a given address. @@ -623,13 +289,8 @@ impl LedgerDB { /// # Arguments /// * `entry_update`: a descriptor of the entry updates to be applied /// * `batch`: the given operation batch to update - fn update_entry( - &mut self, - addr: &Address, - entry_update: LedgerEntryUpdate, - batch: &mut LedgerBatch, - ) { - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); + fn update_entry(&self, addr: &Address, entry_update: LedgerEntryUpdate, batch: &mut DBBatch) { + let db = self.db.read(); // balance if let SetOrKeep::Set(balance) = entry_update.balance { @@ -639,8 +300,11 @@ impl LedgerDB { .serialize(&balance, &mut bytes) .unwrap(); - let balance_key = Key::new(addr, KeyType::BALANCE); - self.update_key_value(handle, batch, &balance_key, &bytes); + let mut serialized_key = Vec::new(); + self.key_serializer_db + .serialize(&Key::new(addr, KeyType::BALANCE), &mut serialized_key) + .expect(KEY_SER_ERROR); + db.put_or_update_entry_value(batch, serialized_key, &bytes); } // bytecode @@ -650,63 +314,59 @@ impl LedgerDB { .serialize(&bytecode, &mut bytes) .unwrap(); - let bytecode_key = Key::new(addr, KeyType::BYTECODE); - self.update_key_value(handle, batch, &bytecode_key, &bytes); + let mut serialized_key = Vec::new(); + self.key_serializer_db + .serialize(&Key::new(addr, KeyType::BYTECODE), &mut serialized_key) + .expect(KEY_SER_ERROR); + db.put_or_update_entry_value(batch, serialized_key, &bytes); } // datastore for (hash, update) in entry_update.datastore { - let datastore_key = Key::new(addr, KeyType::DATASTORE(hash)); + let mut serialized_key = Vec::new(); + self.key_serializer_db + .serialize( + &Key::new(addr, KeyType::DATASTORE(hash)), + &mut serialized_key, + ) + .expect(KEY_SER_ERROR); + match update { SetOrDelete::Set(entry) => { - self.update_key_value(handle, batch, &datastore_key, &entry) + db.put_or_update_entry_value(batch, serialized_key, &entry) } - SetOrDelete::Delete => self.delete_key(handle, batch, &datastore_key), + SetOrDelete::Delete => db.delete_key(batch, serialized_key), } } } - /// Internal function to delete a key and perform the ledger hash XOR - fn delete_key(&self, handle: &ColumnFamily, batch: &mut LedgerBatch, key: &Key) { - let mut serialized_key = Vec::new(); - self.key_serializer_db - .serialize(key, &mut serialized_key) - .expect(KEY_SER_ERROR); - if let Some(added_hash) = batch.aeh_list.get(&serialized_key) { - batch.ledger_hash ^= *added_hash; - } else if let Some(prev_bytes) = self - .db - .get_pinned_cf(handle, &serialized_key) - .expect(CRUD_ERROR) - { - let mut len_bytes = Vec::new(); - self.len_serializer - .serialize(&(serialized_key.len() as u64), &mut len_bytes) - .expect(KEY_LEN_SER_ERROR); - batch.ledger_hash ^= - Hash::compute_from(&[&len_bytes, &serialized_key, &prev_bytes[..]].concat()); - } - batch.write_batch.delete_cf(handle, serialized_key); - } - /// Delete every sub-entry associated to the given address. /// /// # Arguments /// * batch: the given operation batch to update - fn delete_entry(&self, addr: &Address, batch: &mut LedgerBatch) { - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); + fn delete_entry(&self, addr: &Address, batch: &mut DBBatch) { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); // balance - self.delete_key(handle, batch, &Key::new(addr, KeyType::BALANCE)); + let mut serialized_key = Vec::new(); + self.key_serializer_db + .serialize(&Key::new(addr, KeyType::BALANCE), &mut serialized_key) + .expect(KEY_SER_ERROR); + db.delete_key(batch, serialized_key); // bytecode - self.delete_key(handle, batch, &Key::new(addr, KeyType::BYTECODE)); + let mut serialized_key = Vec::new(); + self.key_serializer_db + .serialize(&Key::new(addr, KeyType::BYTECODE), &mut serialized_key) + .expect(KEY_SER_ERROR); + db.delete_key(batch, serialized_key); // datastore let mut opt = ReadOptions::default(); let key_prefix = datastore_prefix_from_address(addr); opt.set_iterate_upper_bound(end_prefix(&key_prefix).unwrap()); - for (key, _) in self + for (serialized_key, _) in db .db .iterator_cf_opt( handle, @@ -715,11 +375,7 @@ impl LedgerDB { ) .flatten() { - let (_, deserialized_key) = self - .key_deserializer_db - .deserialize::(&key) - .expect(KEY_DESER_ERROR); - self.delete_key(handle, batch, &deserialized_key); + db.delete_key(batch, serialized_key.to_vec()); } } } @@ -732,24 +388,31 @@ impl LedgerDB { /// /// # Returns /// A `BTreeMap` with the address as key and the balance as value - #[cfg(any(feature = "testing"))] + #[cfg(feature = "testing")] pub fn get_every_address( &self, ) -> std::collections::BTreeMap { use massa_models::address::AddressDeserializer; + let db = self.db.write(); - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); - let ledger = self + let ledger = db .db - .iterator_cf(handle, IteratorMode::Start) + .prefix_iterator_cf(handle, LEDGER_PREFIX) + .take_while(|kv| { + kv.clone() + .unwrap_or_default() + .0 + .starts_with(LEDGER_PREFIX.as_bytes()) + }) .collect::>(); let mut addresses = std::collections::BTreeMap::new(); let address_deserializer = AddressDeserializer::new(); for (key, entry) in ledger.iter().flatten() { let (rest, address) = address_deserializer - .deserialize::(&key[..]) + .deserialize::(&key[LEDGER_PREFIX.len()..]) .unwrap(); if rest.first() == Some(&BALANCE_IDENT) { let (_, amount) = self @@ -773,13 +436,15 @@ impl LedgerDB { &self, addr: &Address, ) -> std::collections::BTreeMap, Vec> { + let db = self.db.read(); + let key_prefix = datastore_prefix_from_address(addr); - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); let mut opt = ReadOptions::default(); opt.set_iterate_upper_bound(end_prefix(&key_prefix).unwrap()); - self.db + db.db .iterator_cf_opt( handle, opt, @@ -820,12 +485,12 @@ fn end_prefix(prefix: &[u8]) -> Option> { #[cfg(test)] mod tests { use super::*; + use massa_db::{MassaDB, STATE_HASH_INITIAL_BYTES}; use massa_hash::Hash; use massa_ledger_exports::{LedgerEntry, LedgerEntryUpdate, SetOrKeep}; use massa_models::{ address::Address, amount::{Amount, AmountDeserializer}, - streaming_step::StreamingStep, }; use massa_serialization::{DeserializeError, Deserializer}; use massa_signature::KeyPair; @@ -837,6 +502,8 @@ mod tests { #[cfg(test)] fn init_test_ledger(addr: Address) -> (LedgerDB, BTreeMap, Vec>) { // init data + use massa_db::MassaDBConfig; + let mut data = BTreeMap::new(); data.insert(b"1".to_vec(), b"a".to_vec()); data.insert(b"2".to_vec(), b"b".to_vec()); @@ -854,64 +521,78 @@ mod tests { // write data let temp_dir = TempDir::new().unwrap(); - let mut db = LedgerDB::new(temp_dir.path().to_path_buf(), 32, 255, 1_000_000, false); - let mut batch = LedgerBatch::new(Hash::from_bytes(LEDGER_HASH_INITIAL_BYTES)); - db.put_entry(&addr, entry, &mut batch); - db.update_entry(&addr, entry_update, &mut batch); - db.write_batch(batch); + + let db_config = MassaDBConfig { + path: temp_dir.path().to_path_buf(), + max_history_length: 10, + max_new_elements: 100, + thread_count: 32, + }; + + let db = Arc::new(RwLock::new(MassaDB::new(db_config))); + + let ledger_db = LedgerDB::new(db.clone(), 32, 255, 1000); + let mut batch = DBBatch::new(); + + ledger_db.put_entry(&addr, entry, &mut batch); + ledger_db.update_entry(&addr, entry_update, &mut batch); + ledger_db + .db + .write() + .write_batch(batch, Default::default(), None); // return db and initial data - (db, data) + (ledger_db, data) } /// Functional test of `LedgerDB` #[test] fn test_ledger_db() { - let addr = Address::from_public_key(&KeyPair::generate().get_public_key()); - let (db, data) = init_test_ledger(addr); + let addr = Address::from_public_key(&KeyPair::generate(0).unwrap().get_public_key()); + let (ledger_db, data) = init_test_ledger(addr); - let ledger_hash = db.get_ledger_hash(); let amount_deserializer = AmountDeserializer::new(Included(Amount::MIN), Included(Amount::MAX)); // check initial state and entry update - assert!(db.get_sub_entry(&addr, LedgerSubEntry::Balance).is_some()); + assert!(ledger_db + .get_sub_entry(&addr, LedgerSubEntry::Balance) + .is_some()); assert_eq!( amount_deserializer .deserialize::( - &db.get_sub_entry(&addr, LedgerSubEntry::Balance).unwrap() + &ledger_db + .get_sub_entry(&addr, LedgerSubEntry::Balance) + .unwrap() ) .unwrap() .1, Amount::from_str("21").unwrap() ); - assert_eq!(data, db.get_entire_datastore(&addr)); + assert_eq!(data, ledger_db.get_entire_datastore(&addr)); + assert_ne!( - Hash::from_bytes(LEDGER_HASH_INITIAL_BYTES), - db.get_ledger_hash() + Hash::from_bytes(STATE_HASH_INITIAL_BYTES), + ledger_db.db.read().get_db_hash() ); // delete entry - let mut batch = LedgerBatch::new(ledger_hash); - db.delete_entry(&addr, &mut batch); - db.write_batch(batch); + let mut batch = DBBatch::new(); + ledger_db.delete_entry(&addr, &mut batch); + ledger_db + .db + .write() + .write_batch(batch, Default::default(), None); // check deleted address and ledger hash assert_eq!( - Hash::from_bytes(LEDGER_HASH_INITIAL_BYTES), - db.get_ledger_hash() + Hash::from_bytes(STATE_HASH_INITIAL_BYTES), + ledger_db.db.read().get_db_hash() ); - assert!(db.get_sub_entry(&addr, LedgerSubEntry::Balance).is_none()); - assert!(db.get_entire_datastore(&addr).is_empty()); - } - - #[test] - fn test_ledger_parts() { - let pub_a = KeyPair::generate().get_public_key(); - let a = Address::from_public_key(&pub_a); - let (db, _) = init_test_ledger(a); - let res = db.get_ledger_part(StreamingStep::Started).unwrap(); - db.set_ledger_part(&res.0[..]).unwrap(); + assert!(ledger_db + .get_sub_entry(&addr, LedgerSubEntry::Balance) + .is_none()); + assert!(ledger_db.get_entire_datastore(&addr).is_empty()); } #[test] diff --git a/massa-ledger-worker/src/test_exports/bootstrap.rs b/massa-ledger-worker/src/test_exports/bootstrap.rs index 8db918e746a..2c4454d886e 100644 --- a/massa-ledger-worker/src/test_exports/bootstrap.rs +++ b/massa-ledger-worker/src/test_exports/bootstrap.rs @@ -1,30 +1,31 @@ // Copyright (c) 2022 MASSA LABS +use massa_db::MassaDB; use massa_ledger_exports::{LedgerConfig, LedgerController, LedgerEntry}; use massa_models::address::Address; -use std::collections::HashMap; -use tempfile::TempDir; +use parking_lot::RwLock; +use std::{collections::HashMap, sync::Arc}; use crate::{ledger_db::LedgerDB, FinalLedger}; /// This file defines tools to test the ledger bootstrap pub fn create_final_ledger( + db: Arc>, config: LedgerConfig, initial_ledger: HashMap, ) -> FinalLedger { - let temp_dir = TempDir::new().unwrap(); - let mut db = LedgerDB::new( - temp_dir.path().to_path_buf(), + // Create final ledger + let mut ledger_db = LedgerDB::new( + db, config.thread_count, config.max_key_length, - config.max_ledger_part_size, - false, + config.max_datastore_value_length, ); - db.load_initial_ledger(initial_ledger); + ledger_db.load_initial_ledger(initial_ledger); FinalLedger { config, - sorted_ledger: db, + sorted_ledger: ledger_db, } } diff --git a/massa-ledger-worker/src/test_exports/config.rs b/massa-ledger-worker/src/test_exports/config.rs index ab15820165c..7c4eab6f445 100644 --- a/massa-ledger-worker/src/test_exports/config.rs +++ b/massa-ledger-worker/src/test_exports/config.rs @@ -1,23 +1,31 @@ // Copyright (c) 2022 MASSA LABS +use std::sync::Arc; + +use massa_db::{MassaDB, MassaDBConfig}; +use parking_lot::RwLock; /// This file defines testing tools related to the configuration use tempfile::TempDir; use crate::{ledger_db::LedgerDB, FinalLedger}; -use massa_models::config::{ - LEDGER_PART_SIZE_MESSAGE_BYTES, MAX_DATASTORE_KEY_LENGTH, THREAD_COUNT, -}; +use massa_models::config::{MAX_DATASTORE_KEY_LENGTH, MAX_DATASTORE_VALUE_LENGTH, THREAD_COUNT}; /// Default value of `FinalLedger` used for tests impl Default for FinalLedger { fn default() -> Self { let temp_dir = TempDir::new().unwrap(); + let db_config = MassaDBConfig { + path: temp_dir.path().to_path_buf(), + max_history_length: 10, + max_new_elements: 100, + thread_count: THREAD_COUNT, + }; + let db = MassaDB::new(db_config); let db = LedgerDB::new( - temp_dir.path().to_path_buf(), + Arc::new(RwLock::new(db)), THREAD_COUNT, MAX_DATASTORE_KEY_LENGTH, - LEDGER_PART_SIZE_MESSAGE_BYTES, - false, + MAX_DATASTORE_VALUE_LENGTH, ); FinalLedger { config: Default::default(), diff --git a/massa-logging/Cargo.toml b/massa-logging/Cargo.toml index 7371dc8a1d7..924798fb6fd 100644 --- a/massa-logging/Cargo.toml +++ b/massa-logging/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_logging" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -9,7 +9,3 @@ edition = "2021" [dependencies] serde_json = "1.0" tracing = "0.1" - -[dev-dependencies] -pretty_assertions = "1.2" -serial_test = "1.0" diff --git a/massa-logging/src/lib.rs b/massa-logging/src/lib.rs index 59f6a0c898e..4a164954d07 100644 --- a/massa-logging/src/lib.rs +++ b/massa-logging/src/lib.rs @@ -2,10 +2,14 @@ //! Log utilities #![warn(missing_docs)] + +pub use serde_json; +pub use tracing; + #[macro_export] /// tracing with some context macro_rules! massa_trace { ($evt:expr, $params:tt) => { - tracing::trace!("massa:{}:{}", $evt, serde_json::json!($params)); + $crate::tracing::trace!("massa:{}:{}", $evt, $crate::serde_json::json!($params)); }; } diff --git a/massa-models/Cargo.toml b/massa-models/Cargo.toml index ff46c060e36..77ec1295350 100644 --- a/massa-models/Cargo.toml +++ b/massa-models/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_models" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -15,9 +15,9 @@ thiserror = "1.0" num = { version = "0.4", features = ["serde"] } directories = "4.0" config = "0.13" -bs58 = { version = "0.4", features = ["check"] } -bitvec = { version = "1.0", features = ["serde"] } -nom = "7.1" +bs58 = { version = "=0.4", features = ["check"] } +bitvec = { version = "=1.0", features = ["serde"] } +nom = "=7.1" # custom modules massa_hash = { path = "../massa-hash" } @@ -25,6 +25,8 @@ massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } massa_proto = { path = "../massa-proto" } +# TODO tag transition crate with a version number +transition = { git = "https://github.com/massalabs/transition.git", rev = "93fa3bf82f9f5ff421c78536879b7fd1b948ca75" } [dev-dependencies] serial_test = "1.0" diff --git a/massa-models/src/address.rs b/massa-models/src/address.rs index ed0f19d9a75..ae16e4fb421 100644 --- a/massa-models/src/address.rs +++ b/massa-models/src/address.rs @@ -2,22 +2,18 @@ use crate::error::ModelsError; use crate::prehash::PreHashed; -use massa_hash::{Hash, HashDeserializer}; +use massa_hash::{Hash, HashDeserializer, HASH_SIZE_BYTES}; use massa_serialization::{ - DeserializeError, Deserializer, Serializer, U64VarIntDeserializer, U64VarIntSerializer, + DeserializeError, Deserializer, SerializeError, Serializer, U64VarIntDeserializer, + U64VarIntSerializer, }; -use massa_signature::PublicKey; -use nom::branch::alt; -use nom::combinator::verify; -use nom::error::{context, ContextError, ParseError}; -use nom::sequence::preceded; +use massa_signature::{PublicKey, PublicKeyV0, PublicKeyV1}; +use nom::error::{context, ContextError, ErrorKind, ParseError}; use nom::{IResult, Parser}; use serde::{Deserialize, Serialize}; -use std::ops::Bound::Included; +use std::ops::Bound::{Excluded, Included}; use std::str::FromStr; - -/// Size of a serialized address, in bytes -pub const ADDRESS_SIZE_BYTES: usize = massa_hash::HASH_SIZE_BYTES + 1; +use transition::Versioned; /// Top level address representation that can differentiate between User and SC address #[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] @@ -28,50 +24,81 @@ pub enum Address { SC(SCAddress), } -/// In the near future, this will encapsulate slot, idx, and is_write +#[allow(missing_docs)] +/// Derived from a public key. +#[transition::versioned(versions("0", "1"))] #[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct SCAddress(pub Hash); +#[allow(missing_docs)] /// Derived from a public key. +#[transition::versioned(versions("0", "1"))] #[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct UserAddress(pub Hash); -/// TODO: This conversion will need re-writing/removal when SCAddress is optimised -impl From for UserAddress { - fn from(value: SCAddress) -> Self { - Self(value.0) - } -} -/// TODO: This conversion will need re-writing/removal when SCAddress is optimised -impl From for SCAddress { - fn from(value: UserAddress) -> Self { - Self(value.0) - } -} - const ADDRESS_PREFIX: char = 'A'; // serialized with varint const USER_PREFIX: u64 = 0; const SC_PREFIX: u64 = 1; -const ADDRESS_VERSION: u64 = 0; impl std::fmt::Display for Address { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Address::User(address) => address.fmt(f), + Address::SC(address) => address.fmt(f), + } + } +} + +impl std::fmt::Display for UserAddress { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + UserAddress::UserAddressV0(address) => address.fmt(f), + UserAddress::UserAddressV1(address) => address.fmt(f), + } + } +} + +impl std::fmt::Display for SCAddress { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + SCAddress::SCAddressV0(address) => address.fmt(f), + SCAddress::SCAddressV1(address) => address.fmt(f), + } + } +} + +#[transition::impl_version(versions("0", "1"))] +impl std::fmt::Display for UserAddress { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let u64_serializer = U64VarIntSerializer::new(); - // might want to allocate the vector with capacity in order to avoid re-allocation let mut bytes: Vec = Vec::new(); u64_serializer - .serialize(&ADDRESS_VERSION, &mut bytes) + .serialize(&Self::VERSION, &mut bytes) .map_err(|_| std::fmt::Error)?; - bytes.extend(self.hash_bytes()); + bytes.extend(self.0.to_bytes()); write!( f, - "{}{}{}", + "{}U{}", + ADDRESS_PREFIX, + bs58::encode(bytes).with_check().into_string() + ) + } +} + +#[transition::impl_version(versions("0", "1"))] +impl std::fmt::Display for SCAddress { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let u64_serializer = U64VarIntSerializer::new(); + let mut bytes: Vec = Vec::new(); + u64_serializer + .serialize(&Self::VERSION, &mut bytes) + .map_err(|_| std::fmt::Error)?; + bytes.extend(self.0.to_bytes()); + write!( + f, + "{}S{}", ADDRESS_PREFIX, - match self { - Address::User(_) => 'U', - Address::SC(_) => 'S', - }, bs58::encode(bytes).with_check().into_string() ) } @@ -86,11 +113,50 @@ impl std::fmt::Debug for Address { } impl ::serde::Serialize for Address { + fn serialize(&self, s: S) -> Result { + match self { + Address::User(address) => address.serialize(s), + Address::SC(address) => address.serialize(s), + } + } +} + +impl ::serde::Serialize for UserAddress { + fn serialize(&self, s: S) -> Result { + match self { + UserAddress::UserAddressV0(address) => address.serialize(s), + UserAddress::UserAddressV1(address) => address.serialize(s), + } + } +} + +impl ::serde::Serialize for SCAddress { + fn serialize(&self, s: S) -> Result { + match self { + SCAddress::SCAddressV0(address) => address.serialize(s), + SCAddress::SCAddressV1(address) => address.serialize(s), + } + } +} + +#[transition::impl_version(versions("0", "1"))] +impl ::serde::Serialize for UserAddress { + fn serialize(&self, s: S) -> Result { + if s.is_human_readable() { + s.collect_str(&self.to_string()) + } else { + s.serialize_bytes(&self.to_prefixed_bytes()) + } + } +} + +#[transition::impl_version(versions("0", "1"))] +impl ::serde::Serialize for SCAddress { fn serialize(&self, s: S) -> Result { if s.is_human_readable() { s.collect_str(&self.to_string()) } else { - s.serialize_bytes(&self.prefixed_bytes()) + s.serialize_bytes(&self.to_prefixed_bytes()) } } } @@ -154,19 +220,6 @@ impl<'de> ::serde::Deserialize<'de> for Address { impl FromStr for Address { type Err = ModelsError; - /// ## Example - /// ```rust - /// # use massa_signature::{PublicKey, KeyPair, Signature}; - /// # use massa_hash::Hash; - /// # use serde::{Deserialize, Serialize}; - /// # use std::str::FromStr; - /// # use massa_models::address::Address; - /// # let keypair = KeyPair::generate(); - /// # let address = Address::from_public_key(&keypair.get_public_key()); - /// let ser = address.to_string(); - /// let res_addr = Address::from_str(&ser).unwrap(); - /// assert_eq!(address, res_addr); - /// ``` fn from_str(s: &str) -> Result { let err = Err(ModelsError::AddressParseError(s.to_string())); @@ -179,120 +232,320 @@ impl FromStr for Address { return err; }; - // Turn the version + hash encoded string into a byte-vec - let data = chars.collect::(); - let decoded_bs58_check = bs58::decode(data) - .with_check(None) - .into_vec() - .map_err(|_| ModelsError::AddressParseError(s.to_string()))?; - - // extract the version - let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); - let (rest, _version) = u64_deserializer - .deserialize::(&decoded_bs58_check[..]) - .map_err(|_| ModelsError::AddressParseError(s.to_string()))?; - - // ...and package it up - let res = UserAddress(Hash::from_bytes( - rest.try_into() - .map_err(|_| ModelsError::AddressParseError(s.to_string()))?, - )); - let res = match pref { - 'U' => Address::User(res), - 'S' => Address::SC(res.into()), + 'U' => Address::User(UserAddress::from_str_without_prefixed_type(&s[2..])?), + 'S' => Address::SC(SCAddress::from_str_without_prefixed_type(&s[2..])?), _ => return err, }; Ok(res) } } -impl PreHashed for Address {} - impl Address { /// Gets the associated thread. Depends on the `thread_count` + /// Returns None for SC addresses, even though we may want to get_thread on them in the future pub fn get_thread(&self, thread_count: u8) -> u8 { - (self.hash_bytes()[0]) - .checked_shr(8 - thread_count.trailing_zeros()) - .unwrap_or(0) + match self { + Address::User(addr) => addr.get_thread(thread_count), + // TODO: tmp behaviour, discuss how we would want this to work + Address::SC(_addr) => 0, + } + } + + /// Computes the address associated with the given public key. + /// Depends on the Public Key version + pub fn from_public_key(public_key: &PublicKey) -> Self { + Address::User(UserAddress::from_public_key(public_key)) } - fn hash_bytes(&self) -> &[u8; 32] { + /// Serialize the address as bytes. Includes the type and version prefixes + pub fn to_prefixed_bytes(self) -> Vec { match self { - Address::User(addr) => addr.0.to_bytes(), - Address::SC(addr) => addr.0.to_bytes(), + Address::User(addr) => addr.to_prefixed_bytes(), + Address::SC(addr) => addr.to_prefixed_bytes(), } } +} +impl UserAddress { + /// Gets the associated thread. Depends on the `thread_count` + fn get_thread(&self, thread_count: u8) -> u8 { + match self { + UserAddress::UserAddressV0(addr) => addr.get_thread(thread_count), + UserAddress::UserAddressV1(addr) => addr.get_thread(thread_count), + } + } + + /// Computes the address associated with the given public key + fn from_public_key(public_key: &PublicKey) -> Self { + match public_key { + PublicKey::PublicKeyV0(pk) => { + UserAddressVariant!["0"](::from_public_key(pk)) + } + PublicKey::PublicKeyV1(pk) => { + UserAddressVariant!["1"](::from_public_key(pk)) + } + } + } + + fn from_str_without_prefixed_type(s: &str) -> Result { + let decoded_bs58_check = bs58::decode(s).with_check(None).into_vec().map_err(|err| { + ModelsError::AddressParseError(format!( + "in UserAddress from_str_without_prefixed_type: {}", + err + )) + })?; + let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); + let (rest, version) = u64_deserializer + .deserialize::(&decoded_bs58_check[..]) + .map_err(|err| { + ModelsError::AddressParseError(format!( + "in UserAddress from_str_without_prefixed_type: {}", + err + )) + })?; + + match version { + ::VERSION => Ok(UserAddressVariant!["0"]( + ::from_bytes_without_version(rest)?, + )), + ::VERSION => Ok(UserAddressVariant!["1"]( + ::from_bytes_without_version(rest)?, + )), + unhandled_version => Err(ModelsError::AddressParseError(format!( + "version {} is not handled for UserAddress", + unhandled_version + ))), + } + } + + /// Serialize the address as bytes. Includes the type and version prefixes + pub fn to_prefixed_bytes(self) -> Vec { + match self { + UserAddress::UserAddressV0(addr) => addr.to_prefixed_bytes(), + UserAddress::UserAddressV1(addr) => addr.to_prefixed_bytes(), + } + } +} + +#[transition::impl_version(versions("0", "1"))] +impl UserAddress { + /// Fetches the version of the UserAddress + pub fn get_version(&self) -> u64 { + Self::VERSION + } + + /// Serialize the address as bytes. Includes the type and version prefixes + fn to_prefixed_bytes(self) -> Vec { + let mut buff = vec![]; + let addr_type_ser = U64VarIntSerializer::new(); + let addr_vers_ser = U64VarIntSerializer::new(); + addr_type_ser + .serialize(&USER_PREFIX, &mut buff) + .expect("impl always returns Ok(())"); + addr_vers_ser + .serialize(&Self::VERSION, &mut buff) + .expect("impl always returns Ok(())"); + buff.extend_from_slice(&self.0.to_bytes()[..]); + buff + } + + /// Gets the associated thread. Depends on the `thread_count` + fn get_thread(&self, thread_count: u8) -> u8 { + (self.0.to_bytes()[0]) + .checked_shr(8 - thread_count.trailing_zeros()) + .unwrap_or(0) + } + + /// Deserialize the address without considering the version byte + fn from_bytes_without_version(data: &[u8]) -> Result { + Ok(UserAddress(Hash::from_bytes(&data.try_into().map_err( + |_| { + ModelsError::BufferError(format!( + "expected a buffer of size {}, but found a size of {}", + HASH_SIZE_BYTES, + &data.len() + )) + }, + )?))) + } +} + +#[transition::impl_version(versions("0", "1"), structures("UserAddress", "PublicKey"))] +impl UserAddress { /// Computes address associated with given public key pub fn from_public_key(public_key: &PublicKey) -> Self { - Address::User(UserAddress(Hash::compute_from(public_key.to_bytes()))) + UserAddress(Hash::compute_from(&public_key.to_bytes())) + } +} + +#[transition::impl_version(versions("0"))] +impl UserAddress {} + +#[transition::impl_version(versions("1"))] +impl UserAddress {} + +impl SCAddress { + fn from_str_without_prefixed_type(s: &str) -> Result { + let decoded_bs58_check = bs58::decode(s).with_check(None).into_vec().map_err(|err| { + ModelsError::AddressParseError(format!( + "in SCAddress from_str_without_prefixed_type: {}", + err + )) + })?; + let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); + let (rest, version) = u64_deserializer + .deserialize::(&decoded_bs58_check[..]) + .map_err(|err| { + ModelsError::AddressParseError(format!( + "in SCAddress from_str_without_prefixed_type: {}", + err + )) + })?; + + match version { + ::VERSION => Ok(SCAddressVariant!["0"]( + ::from_bytes_without_version(rest)?, + )), + ::VERSION => Ok(SCAddressVariant!["1"]( + ::from_bytes_without_version(rest)?, + )), + unhandled_version => Err(ModelsError::AddressParseError(format!( + "version {} is not handled for SCAddress", + unhandled_version + ))), + } + } + + /// Serialize the address as bytes. Includes the type and version prefixes + pub fn to_prefixed_bytes(self) -> Vec { + match self { + SCAddress::SCAddressV0(addr) => addr.to_prefixed_bytes(), + SCAddress::SCAddressV1(addr) => addr.to_prefixed_bytes(), + } } - /// Inner implementation for serializer. Mostly made available for the benefit of macros. - pub fn prefixed_bytes(&self) -> Vec { + /// Deserialize the address without considering the version byte + pub fn from_bytes_without_version(version: u64, data: &[u8]) -> Result { + match version { + ::VERSION => Ok(SCAddressVariant!["0"]( + ::from_bytes_without_version(data)?, + )), + ::VERSION => Ok(SCAddressVariant!["1"]( + ::from_bytes_without_version(data)?, + )), + unhandled_version => Err(ModelsError::AddressParseError(format!( + "version {} is not handled for SCAddress", + unhandled_version + ))), + } + } +} + +#[transition::impl_version(versions("0", "1"))] +impl SCAddress { + /// Fetches the version of the SC Address + pub fn get_version(&self) -> u64 { + Self::VERSION + } +} + +#[transition::impl_version(versions("0", "1"))] +impl SCAddress { + /// Serialize the address as bytes. Includes the type and version prefixes + pub fn to_prefixed_bytes(self) -> Vec { let mut buff = vec![]; - let pref_ser = U64VarIntSerializer::new(); - let val = match self { - Address::User(_) => USER_PREFIX, - Address::SC(_) => SC_PREFIX, - }; - pref_ser - .serialize(&val, &mut buff) + let addr_type_ser = U64VarIntSerializer::new(); + let addr_vers_ser = U64VarIntSerializer::new(); + addr_type_ser + .serialize(&SC_PREFIX, &mut buff) + .expect("impl always returns Ok(())"); + addr_vers_ser + .serialize(&Self::VERSION, &mut buff) .expect("impl always returns Ok(())"); - buff.extend_from_slice(&self.hash_bytes()[..]); + buff.extend_from_slice(&self.0.to_bytes()[..]); buff } - #[cfg(any(test, feature = "testing"))] - /// Convenience wrapper around the address serializer. Useful for hard-coding an address when testing - pub fn from_prefixed_bytes(data: &[u8]) -> Result { - let deser = AddressDeserializer::new(); - let (_, res) = deser.deserialize::(data).map_err(|_| { - match std::str::from_utf8(data) { - Ok(res) => ModelsError::AddressParseError(res.to_string()), - Err(e) => { - ModelsError::AddressParseError(format!("Error on retrieve address : {}", e)) - } - } - })?; - Ok(res) + /// Deserialize the address without considering the version byte + fn from_bytes_without_version(data: &[u8]) -> Result { + Ok(SCAddress(Hash::from_bytes(&data.try_into().map_err( + |_| { + ModelsError::BufferError(format!( + "expected a buffer of size {}, but found a size of {}", + HASH_SIZE_BYTES, + &data.len() + )) + }, + )?))) } } +/* /!\ SCAddressV1 not prehashed! */ +impl PreHashed for Address {} + /// Serializer for `Address` #[derive(Default, Clone)] -pub struct AddressSerializer; +pub struct AddressSerializer { + type_serializer: U64VarIntSerializer, + version_serializer: U64VarIntSerializer, +} impl AddressSerializer { /// Serializes an `Address` into a `Vec` pub fn new() -> Self { - Self + Self { + type_serializer: U64VarIntSerializer::new(), + version_serializer: U64VarIntSerializer::new(), + } } } impl Serializer
for AddressSerializer { - /// # Example - /// ```rust - /// # use massa_signature::{PublicKey, KeyPair, Signature}; - /// # use serde::{Deserialize, Serialize}; - /// # use massa_models::address::{UserAddress, Address, AddressSerializer}; - /// # use massa_serialization::{Serializer, SerializeError}; - /// use massa_hash::Hash; - /// let bytes = &[0; 32]; - /// // Make a hard-coded 0-byte container - /// let ref_addr = Address::User(UserAddress(Hash::from_bytes(bytes))); - /// let mut vec = vec![]; - /// AddressSerializer::new().serialize(&ref_addr, &mut vec).unwrap(); - /// // the deser adds the prefix value '0' in a single byte - /// assert_eq!(vec, [[0].as_slice(), bytes.as_slice()].concat()); - /// ``` - fn serialize( - &self, - value: &Address, - buffer: &mut Vec, - ) -> Result<(), massa_serialization::SerializeError> { - buffer.extend_from_slice(&value.prefixed_bytes()); + fn serialize(&self, value: &Address, buffer: &mut Vec) -> Result<(), SerializeError> { + match value { + Address::User(addr) => self.serialize(addr, buffer), + Address::SC(addr) => self.serialize(addr, buffer), + } + } +} + +impl Serializer for AddressSerializer { + fn serialize(&self, value: &UserAddress, buffer: &mut Vec) -> Result<(), SerializeError> { + self.type_serializer.serialize(&USER_PREFIX, buffer)?; + match value { + UserAddress::UserAddressV0(addr) => self.serialize(addr, buffer), + UserAddress::UserAddressV1(addr) => self.serialize(addr, buffer), + } + } +} + +#[transition::impl_version(versions("0", "1"), structures("UserAddress"))] +impl Serializer for AddressSerializer { + fn serialize(&self, value: &UserAddress, buffer: &mut Vec) -> Result<(), SerializeError> { + self.version_serializer + .serialize(&value.get_version(), buffer)?; + buffer.extend_from_slice(&value.0.into_bytes()); + Ok(()) + } +} + +impl Serializer for AddressSerializer { + fn serialize(&self, value: &SCAddress, buffer: &mut Vec) -> Result<(), SerializeError> { + self.type_serializer.serialize(&SC_PREFIX, buffer)?; + match value { + SCAddress::SCAddressV0(addr) => self.serialize(addr, buffer), + SCAddress::SCAddressV1(addr) => self.serialize(addr, buffer), + } + } +} + +#[transition::impl_version(versions("0", "1"), structures("SCAddress"))] +impl Serializer for AddressSerializer { + fn serialize(&self, value: &SCAddress, buffer: &mut Vec) -> Result<(), SerializeError> { + self.version_serializer + .serialize(&value.get_version(), buffer)?; + buffer.extend_from_slice(&value.0.into_bytes()); Ok(()) } } @@ -300,86 +553,140 @@ impl Serializer
for AddressSerializer { /// Deserializer for `Address` #[derive(Clone)] pub struct AddressDeserializer { + type_deserializer: U64VarIntDeserializer, + version_deserializer: U64VarIntDeserializer, hash_deserializer: HashDeserializer, - int_deserializer: U64VarIntDeserializer, } + +impl Default for AddressDeserializer { + fn default() -> Self { + AddressDeserializer::new() + } +} + impl AddressDeserializer { /// Creates a new deserializer for `Address` pub const fn new() -> Self { Self { + type_deserializer: U64VarIntDeserializer::new(Included(0), Included(1)), + version_deserializer: U64VarIntDeserializer::new(Included(0), Excluded(u64::MAX)), hash_deserializer: HashDeserializer::new(), - int_deserializer: U64VarIntDeserializer::new(Included(0), Included(1)), } } } impl Deserializer
for AddressDeserializer { - /// # Example - /// ```rust - /// use massa_models::address::{UserAddress, Address, AddressDeserializer}; - /// use massa_serialization::{Deserializer, DeserializeError}; - /// use massa_hash::Hash; - /// // Make a hard-coded 0-byte container - /// let bytes = [[0].as_slice(), [0; 32].as_slice()].concat(); - /// let ref_addr = Address::User(UserAddress(Hash::from_bytes(bytes[1..33].try_into().unwrap()))); - /// let res_addr = AddressDeserializer::new().deserialize::(&bytes).unwrap(); - /// // the deser adds the prefix value '0' in a single byte - /// assert_eq!(ref_addr, res_addr.1); - /// assert_eq!(0, res_addr.0.len()); - /// ``` fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( &self, buffer: &'a [u8], ) -> IResult<&'a [u8], Address, E> { - context("Address Variant", |input| { - alt(( - |input| user_parser(&self.int_deserializer, &self.hash_deserializer, input), - |input| sc_parser(&self.int_deserializer, &self.hash_deserializer, input), - )) - .parse(input) + if buffer.len() < 2 { + return Err(nom::Err::Error(E::from_error_kind(buffer, ErrorKind::Eof))); + } + let (rest, addr_type) = + self.type_deserializer + .deserialize(buffer) + .map_err(|_: nom::Err| { + nom::Err::Error(E::from_error_kind(buffer, ErrorKind::Eof)) + })?; + match addr_type { + USER_PREFIX => { + let (rest, addr) = self.deserialize(rest)?; + Ok((rest, Address::User(addr))) + } + SC_PREFIX => { + let (rest, addr) = self.deserialize(rest)?; + Ok((rest, Address::SC(addr))) + } + _ => Err(nom::Err::Error(E::from_error_kind(buffer, ErrorKind::Eof))), + } + } +} + +impl Deserializer for AddressDeserializer { + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], UserAddress, E> { + if buffer.len() < 2 { + return Err(nom::Err::Error(E::from_error_kind(buffer, ErrorKind::Eof))); + } + let (rest, addr_vers) = + self.version_deserializer + .deserialize(buffer) + .map_err(|_: nom::Err| { + nom::Err::Error(E::from_error_kind(buffer, ErrorKind::Eof)) + })?; + match addr_vers { + ::VERSION => { + let (rest, addr) = self.deserialize(rest)?; + Ok((rest, UserAddressVariant!["0"](addr))) + } + ::VERSION => { + let (rest, addr) = self.deserialize(rest)?; + Ok((rest, UserAddressVariant!["1"](addr))) + } + _ => Err(nom::Err::Error(E::from_error_kind(buffer, ErrorKind::Eof))), + } + } +} + +#[transition::impl_version(versions("0", "1"), structures("UserAddress"))] +impl Deserializer for AddressDeserializer { + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], UserAddress, E> { + context("Failed UserAddress deserialization", |input| { + self.hash_deserializer.deserialize(input) }) + .map(UserAddress) .parse(buffer) } } -// used to make the `alt(...)` more readable -fn user_parser<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( - pref_deser: &U64VarIntDeserializer, - deser: &HashDeserializer, - input: &'a [u8], -) -> IResult<&'a [u8], Address, E> { - context( - "Failed attempt to deserialise User Address", - preceded( - verify( - |input| pref_deser.deserialize(input), - |val| *val == USER_PREFIX, - ), - |input| deser.deserialize(input), - ), - ) - .map(|hash| Address::User(UserAddress(hash))) - .parse(input) -} -// used to make the `alt(...)` more readable. Will be usefull when the SCAddress will be deserialised differently -fn sc_parser<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( - pref_deser: &U64VarIntDeserializer, - deser: &HashDeserializer, - input: &'a [u8], -) -> IResult<&'a [u8], Address, E> { - context( - "Failed attempt to deserialise SC Address", - preceded( - verify( - |input| pref_deser.deserialize(input), - |val| *val == SC_PREFIX, - ), - |input| deser.deserialize(input), - ), - ) - .map(|hash| Address::SC(SCAddress(hash))) - .parse(input) +impl Deserializer for AddressDeserializer { + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], SCAddress, E> { + if buffer.len() < 2 { + return Err(nom::Err::Error(E::from_error_kind(buffer, ErrorKind::Eof))); + } + let (rest, addr_vers) = + self.version_deserializer + .deserialize(buffer) + .map_err(|_: nom::Err| { + nom::Err::Error(E::from_error_kind(buffer, ErrorKind::Eof)) + })?; + match addr_vers { + ::VERSION => { + let (rest, addr) = self.deserialize(rest)?; + Ok((rest, SCAddressVariant!["0"](addr))) + } + ::VERSION => { + let (rest, addr) = self.deserialize(&buffer[1..])?; + Ok((rest, SCAddressVariant!["1"](addr))) + } + _ => Err(nom::Err::Error(E::from_error_kind(buffer, ErrorKind::Eof))), + } + } +} + +#[transition::impl_version(versions("0", "1"), structures("SCAddress"))] +impl Deserializer for AddressDeserializer { + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], SCAddress, E> { + context("Failed SCAddress deserialization", |input| { + self.hash_deserializer.deserialize(input) + }) + .map(SCAddress) + .parse(buffer) + } } + /// Info for a given address on a given cycle #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ExecutionAddressCycleInfo { @@ -400,13 +707,29 @@ mod test { use super::*; #[test] - fn test_address_str_format() { - use massa_signature::KeyPair; - - let keypair = KeyPair::generate(); - let address = Address::from_public_key(&keypair.get_public_key()); - let a = address.to_string(); - let b = Address::from_str(&a).unwrap(); - assert_eq!(address, b); + fn test_address() { + let hash = massa_hash::Hash::compute_from(&"ADDR".as_bytes()); + + let user_addr_0 = Address::User(UserAddress::UserAddressV0(UserAddressV0(hash))); + let user_addr_1 = Address::User(UserAddress::UserAddressV1(UserAddressV1(hash))); + let sc_addr_0 = Address::SC(SCAddress::SCAddressV0(SCAddressV0(hash))); + let sc_addr_1 = Address::SC(SCAddress::SCAddressV1(SCAddressV1(hash))); + + println!("user_addr_0: {}", user_addr_0); + println!("user_addr_1: {}", user_addr_1); + println!("sc_addr_0: {}", sc_addr_0); + println!("sc_addr_1: {}", sc_addr_1); + + // let v1 = "AU12M3AQqs7JH7mSe1UZyEA5NQ7nGQHXaqqxe1TGEpkimcRhsQ4eF"; + let v2 = "AU4cJWyjpBetGwaRqFDXyrHiQuGB3QKrwjzGiGSzQPGeAARB9AY4"; + let addr = Address::from_str(v2).unwrap(); + + let mut buffer: Vec = vec![]; + let _ = AddressSerializer::new().serialize(&addr, &mut buffer); + let (_rest, addr2): (&[u8], Address) = AddressDeserializer::new() + .deserialize::(&buffer) + .unwrap(); + + assert_eq!(addr, addr2); } } diff --git a/massa-models/src/block.rs b/massa-models/src/block.rs index 7b7d3ab646d..67b460d7db5 100644 --- a/massa-models/src/block.rs +++ b/massa-models/src/block.rs @@ -150,7 +150,7 @@ impl Serializer for BlockSerializer { /// use massa_hash::Hash; /// use massa_signature::KeyPair; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let parents = (0..THREAD_COUNT) /// .map(|i| BlockId(Hash::compute_from(&[i]))) /// .collect(); @@ -158,6 +158,8 @@ impl Serializer for BlockSerializer { /// // create block header /// let orig_header = BlockHeader::new_verifiable( /// BlockHeader { + /// current_version: 0, + /// announced_version: 0, /// slot: Slot::new(1, 1), /// parents, /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -251,7 +253,7 @@ impl Deserializer for BlockDeserializer { /// use massa_hash::Hash; /// use massa_signature::KeyPair; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let parents: Vec = (0..THREAD_COUNT) /// .map(|i| BlockId(Hash::compute_from(&[i]))) /// .collect(); @@ -259,6 +261,8 @@ impl Deserializer for BlockDeserializer { /// // create block header /// let orig_header = BlockHeader::new_verifiable( /// BlockHeader { + /// current_version: 0, + /// announced_version: 0, /// slot: Slot::new(1, 1), /// parents: parents.clone(), /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -471,6 +475,8 @@ mod test { // create block header let orig_header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(1, 0), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -547,12 +553,14 @@ mod test { #[test] #[serial] fn test_genesis_block_serialization() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let parents: Vec = vec![]; // create block header let orig_header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(0, 1), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -626,7 +634,7 @@ mod test { #[test] #[serial] fn test_invalid_genesis_block_serialization_with_endorsements() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let parents: Vec = vec![]; // Genesis block do not have any parents and thus cannot embed endorsements @@ -639,6 +647,8 @@ mod test { // create block header let orig_header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(0, 1), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -691,7 +701,7 @@ mod test { #[test] #[serial] fn test_invalid_genesis_block_serialization_with_parents() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let parents = (0..THREAD_COUNT) .map(|i| BlockId(Hash::compute_from(&[i]))) .collect(); @@ -699,6 +709,8 @@ mod test { // create block header let orig_header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(0, 1), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -743,12 +755,14 @@ mod test { #[test] #[serial] fn test_invalid_block_serialization_no_parents() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); // Non genesis block must have THREAD_COUNT parents // create block header let orig_header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(1, 1), parents: vec![], operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -793,7 +807,7 @@ mod test { #[test] #[serial] fn test_invalid_block_serialization_obo_high_parent_count() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); // Non genesis block must have THREAD_COUNT parents let parents = (0..=THREAD_COUNT) .map(|i| BlockId(Hash::compute_from(&[i]))) @@ -802,6 +816,8 @@ mod test { // create block header let orig_header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(1, 1), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -877,6 +893,8 @@ mod test { // create block header let orig_header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(1, 0), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -923,7 +941,7 @@ mod test { #[test] #[serial] fn test_invalid_block_serialization_obo_low_parent_count() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); // Non genesis block must have THREAD_COUNT parents let parents = (1..THREAD_COUNT) .map(|i| BlockId(Hash::compute_from(&[i]))) @@ -932,6 +950,8 @@ mod test { // create block header let orig_header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(1, 1), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -976,7 +996,7 @@ mod test { #[test] #[serial] fn test_invalid_block_serialization_obo_high_endo_count() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); // Non genesis block must have THREAD_COUNT parents let parents = (0..THREAD_COUNT) .map(|i| BlockId(Hash::compute_from(&[i]))) @@ -999,6 +1019,8 @@ mod test { // create block header let orig_header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(1, 1), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -1071,6 +1093,8 @@ mod test { // create block header let orig_header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(1, 0), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -1155,6 +1179,8 @@ mod test { // create block header let orig_header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(1, 0), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), diff --git a/massa-models/src/block_header.rs b/massa-models/src/block_header.rs index 9cb1e499eaf..e0204ca5a03 100644 --- a/massa-models/src/block_header.rs +++ b/massa-models/src/block_header.rs @@ -27,6 +27,10 @@ use std::fmt::Formatter; /// block header #[derive(Debug, Clone, Serialize, Deserialize)] pub struct BlockHeader { + /// current network version + pub current_version: u32, + /// announced network version + pub announced_version: u32, /// slot pub slot: Slot, /// parents @@ -42,6 +46,9 @@ pub struct BlockHeader { // TODO: gh-issue #3398 #[cfg(any(test, feature = "testing"))] impl BlockHeader { + /// This is an intentional duplication of invariant checks. In production code, + /// these checks are dispersed throughout the deserialization process. This test-only function + /// allows all the checks to be in one place. fn assert_invariants( &self, thread_count: u8, @@ -164,11 +171,13 @@ impl Serializer for BlockHeaderSerializer { /// use massa_signature::KeyPair; /// use massa_serialization::Serializer; /// - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let parents = (0..THREAD_COUNT) /// .map(|i| BlockId(Hash::compute_from(&[i]))) /// .collect(); /// let header = BlockHeader { + /// current_version: 0, + /// announced_version: 0, /// slot: Slot::new(1, 1), /// parents, /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -200,7 +209,15 @@ impl Serializer for BlockHeaderSerializer { /// BlockHeaderSerializer::new().serialize(&header, &mut buffer).unwrap(); /// ``` fn serialize(&self, value: &BlockHeader, buffer: &mut Vec) -> Result<(), SerializeError> { + // network versions + self.u32_serializer + .serialize(&value.current_version, buffer)?; + self.u32_serializer + .serialize(&value.announced_version, buffer)?; + + // slot self.slot_serializer.serialize(&value.slot, buffer)?; + // parents (note: there should be none if slot period=0) if value.parents.is_empty() { buffer.push(0); @@ -254,6 +271,7 @@ pub struct BlockHeaderDeserializer { last_start_period: Option, denunciation_len_deserializer: U32VarIntDeserializer, denunciation_deserializer: DenunciationDeserializer, + network_versions_deserializer: U32VarIntDeserializer, } impl BlockHeaderDeserializer { @@ -280,6 +298,10 @@ impl BlockHeaderDeserializer { Included(0), Included(max_denunciations_in_block_header), ), + network_versions_deserializer: U32VarIntDeserializer::new( + Included(0), + Included(u32::MAX), + ), denunciation_deserializer: DenunciationDeserializer::new( thread_count, endorsement_count, @@ -302,11 +324,13 @@ impl Deserializer for BlockHeaderDeserializer { /// use massa_signature::KeyPair; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let parents: Vec = (0..THREAD_COUNT) /// .map(|i| BlockId(Hash::compute_from(&[i]))) /// .collect(); /// let header = BlockHeader { + /// current_version: 0, + /// announced_version: 0, /// slot: Slot::new(1, 1), /// parents: parents.clone(), /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -342,65 +366,79 @@ impl Deserializer for BlockHeaderDeserializer { /// BlockHeaderSerializer::new().serialize(&deserialized_header, &mut buffer2).unwrap(); /// assert_eq!(buffer, buffer2); /// ``` + #[allow(clippy::type_complexity)] fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( &self, buffer: &'a [u8], ) -> IResult<&'a [u8], BlockHeader, E> { - let (rest, (slot, parents, operation_merkle_root)): (&[u8], (Slot, Vec, Hash)) = - context("Failed BlockHeader deserialization", |input| { - let (rest, (slot, parents)) = tuple(( - context("Failed slot deserialization", |input| { - self.slot_deserializer.deserialize(input) - }), - context( - "Failed parents deserialization", - alt(( - preceded(tag(&[0]), |input| Ok((input, Vec::new()))), - preceded( - tag(&[1]), - count( - context("Failed block_id deserialization", |input| { - self.hash_deserializer - .deserialize(input) - .map(|(rest, hash)| (rest, BlockId(hash))) - }), - self.thread_count as usize, - ), + let (rest, (current_version, announced_version, slot, parents, operation_merkle_root)): ( + &[u8], + (u32, u32, Slot, Vec, Hash), + ) = context("Failed BlockHeader deserialization", |input| { + let (rest, (current_version, announced_version, slot, parents)) = tuple(( + context("Failed current_version deserialization", |input| { + self.network_versions_deserializer.deserialize(input) + }), + context("Failed announced_version deserialization", |input| { + self.network_versions_deserializer.deserialize(input) + }), + context("Failed slot deserialization", |input| { + self.slot_deserializer.deserialize(input) + }), + context( + "Failed parents deserialization", + alt(( + preceded(tag(&[0]), |input| Ok((input, Vec::new()))), + preceded( + tag(&[1]), + count( + context("Failed block_id deserialization", |input| { + self.hash_deserializer + .deserialize(input) + .map(|(rest, hash)| (rest, BlockId(hash))) + }), + self.thread_count as usize, ), - )), - ), - )) - .parse(input)?; - - // validate the parent/slot invariants before moving on to other fields - if let Some(last_start_period) = self.last_start_period { - if slot.period == last_start_period && !parents.is_empty() { - return Err(nom::Err::Failure(ContextError::add_context( - rest, - "Genesis block cannot contain parents", - ParseError::from_error_kind(rest, nom::error::ErrorKind::Fail), - ))); - } else if slot.period != last_start_period - && parents.len() != self.thread_count as usize - { - return Err(nom::Err::Failure(ContextError::add_context( - rest, - "Non-genesis block must have same numbers of parents as threads count", - ParseError::from_error_kind(rest, nom::error::ErrorKind::Fail), - ))); - } + ), + )), + ), + )) + .parse(input)?; + + // validate the parent/slot invariants before moving on to other fields + if let Some(last_start_period) = self.last_start_period { + if slot.period == last_start_period && !parents.is_empty() { + return Err(nom::Err::Failure(ContextError::add_context( + rest, + "Genesis block cannot contain parents", + ParseError::from_error_kind(rest, nom::error::ErrorKind::Fail), + ))); + } else if slot.period != last_start_period + && parents.len() != self.thread_count as usize + { + return Err(nom::Err::Failure(ContextError::add_context( + rest, + "Non-genesis block must have same numbers of parents as threads count", + ParseError::from_error_kind(rest, nom::error::ErrorKind::Fail), + ))); } + } - let (rest, merkle) = context("Failed operation_merkle_root", |input| { - self.hash_deserializer.deserialize(input) - }) - .parse(rest)?; - Ok((rest, (slot, parents, merkle))) + let (rest, merkle) = context("Failed operation_merkle_root", |input| { + self.hash_deserializer.deserialize(input) }) - .parse(buffer)?; + .parse(rest)?; + Ok(( + rest, + (current_version, announced_version, slot, parents, merkle), + )) + }) + .parse(buffer)?; if parents.is_empty() { let res = BlockHeader { + current_version, + announced_version, slot, parents, operation_merkle_root, @@ -478,6 +516,8 @@ impl Deserializer for BlockHeaderDeserializer { .parse(rest)?; let header = BlockHeader { + current_version, + announced_version, slot, parents, operation_merkle_root, @@ -577,7 +617,7 @@ mod test { #[test] fn test_block_header_ser_der() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let slot = Slot::new(7, 1); let parents_1: Vec = (0..THREAD_COUNT) @@ -604,6 +644,8 @@ mod test { let de_b = Denunciation::try_from((&s_endo_1, &s_endo_2)).unwrap(); let block_header_1 = BlockHeader { + current_version: 0, + announced_version: 0, slot, parents: parents_1, operation_merkle_root: Hash::compute_from("mno".as_bytes()), diff --git a/massa-models/src/config/compact_config.rs b/massa-models/src/config/compact_config.rs index 97ae34dbe44..76f3350b055 100644 --- a/massa-models/src/config/compact_config.rs +++ b/massa-models/src/config/compact_config.rs @@ -51,10 +51,10 @@ impl Display for CompactConfig { writeln!( f, " Genesis time: {}", - self.genesis_timestamp.to_utc_string() + self.genesis_timestamp.format_instant() )?; if let Some(end) = self.end_timestamp { - writeln!(f, " End time: {}", end.to_utc_string())?; + writeln!(f, " End time: {}", end.format_instant())?; } writeln!(f, " Thread count: {}", self.thread_count)?; writeln!(f, " t0: {}", self.t0)?; diff --git a/massa-models/src/config/constants.rs b/massa-models/src/config/constants.rs index e279d13e97f..ec9da3d3181 100644 --- a/massa-models/src/config/constants.rs +++ b/massa-models/src/config/constants.rs @@ -15,14 +15,21 @@ //! (`default_testing.rs`) But as for the current file you shouldn't modify it. use std::str::FromStr; -use crate::{ - address::ADDRESS_SIZE_BYTES, amount::Amount, serialization::u32_be_bytes_min_length, - version::Version, -}; +use crate::{amount::Amount, serialization::u32_be_bytes_min_length, version::Version}; use massa_signature::KeyPair; use massa_time::MassaTime; use num::rational::Ratio; +/// Downtime simulation start timestamp +pub const DOWNTIME_START_TIMESTAMP: MassaTime = MassaTime::from_millis(1686312000000); // Friday 9 June 2023 12:00:00 UTC +/// Downtime simulation end timestamp +pub const DOWNTIME_END_TIMESTAMP: MassaTime = MassaTime::from_millis(1686319200000); // Friday 9 June 2023 14:00:00 UTC +/// Downtime simulation end timestamp for bootstrap servers +pub const DOWNTIME_END_TIMESTAMP_BOOTSTRAP: MassaTime = MassaTime::from_millis(1686312060000); // Friday 9 June 2023 12:01:00 UTC + +/// IMPORTANNT TODO: should be removed after the bootstrap messages refacto +pub const SIGNATURE_DESER_SIZE: usize = 64 + 1; + /// Limit on the number of peers we advertise to others. pub const MAX_ADVERTISE_LENGTH: u32 = 10000; /// Maximum message length in bytes @@ -44,7 +51,7 @@ lazy_static::lazy_static! { /// In sandbox mode, the value depends on starting time and on the --restart-from-snapshot-at-period argument in CLI, /// so that the network starts or restarts 10 seconds after launch pub static ref GENESIS_TIMESTAMP: MassaTime = if cfg!(feature = "sandbox") { - std::env::var("GENESIS_TIMESTAMP").map(|timestamp| timestamp.parse::().unwrap().into()).unwrap_or_else(|_| + std::env::var("GENESIS_TIMESTAMP").map(|timestamp| MassaTime::from_millis(timestamp.parse::().unwrap())).unwrap_or_else(|_| MassaTime::now() .unwrap() .saturating_sub( @@ -54,14 +61,14 @@ lazy_static::lazy_static! { ) ) } else { - 1683498600000.into() // Sunday, May 7, 2023 10:30:00 PM UTC + MassaTime::from_millis(1685955600000) // Monday, June 5, 2023 9:00:00 AM UTC }; /// TESTNET: time when the blockclique is ended. pub static ref END_TIMESTAMP: Option = if cfg!(feature = "sandbox") { None } else { - Some(1685556000000.into()) // Sunday, May 30, 2023 06:00:00 PM UTC + Some(MassaTime::from_millis(1688140800000)) // Friday, June 30, 2023 04:00:00 PM UTC }; /// `KeyPair` to sign genesis blocks. pub static ref GENESIS_KEY: KeyPair = KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8") @@ -71,9 +78,9 @@ lazy_static::lazy_static! { /// node version pub static ref VERSION: Version = { if cfg!(feature = "sandbox") { - "SAND.22.1" + "SAND.23.0" } else { - "TEST.22.2" + "TEST.23.0" } .parse() .unwrap() @@ -101,8 +108,10 @@ pub const ROLL_PRICE: Amount = Amount::from_mantissa_scale(100, 0); pub const BLOCK_REWARD: Amount = Amount::from_mantissa_scale(3, 1); /// Cost to store one byte in the ledger pub const LEDGER_COST_PER_BYTE: Amount = Amount::from_mantissa_scale(25, 5); -/// Cost for a base entry (address + balance (5 bytes constant)) -pub const LEDGER_ENTRY_BASE_SIZE: usize = ADDRESS_SIZE_BYTES + 8; +/// Address size in bytes +pub const ADDRESS_SIZE_BYTES: usize = 32; +/// Cost for a base entry default 0.01 MASSA +pub const LEDGER_ENTRY_BASE_COST: Amount = Amount::from_mantissa_scale(1, 2); /// Cost for a base entry datastore 10 bytes constant to avoid paying more for longer keys pub const LEDGER_ENTRY_DATASTORE_BASE_SIZE: usize = 10; /// Time between the periods in the same thread. @@ -127,6 +136,8 @@ pub const MAX_ASYNC_MESSAGE_DATA: u64 = 1_000_000; pub const OPERATION_VALIDITY_PERIODS: u64 = 10; /// cycle duration in periods pub const PERIODS_PER_CYCLE: u64 = 128; +/// cycle duration in periods +pub const PERIODS_BETWEEN_BACKUPS: u64 = 128; /// Number of cycles saved in `PoSFinalState` /// /// 6 for PoS itself so we can check denuncations on selections at C-2 after a bootstrap @@ -138,14 +149,6 @@ pub const POS_SAVED_CYCLES: usize = 7; /// 5 to have a C-2 to C+2 range (6 cycles post-bootstrap give 5 cycle draws) /// 1 for margin pub const SELECTOR_DRAW_CACHE_SIZE: usize = 6; -/// Maximum size batch of data in a part of the ledger -pub const LEDGER_PART_SIZE_MESSAGE_BYTES: u64 = 1_000_000; -/// Maximum async messages in a batch of the bootstrap of the async pool -pub const ASYNC_POOL_BOOTSTRAP_PART_SIZE: u64 = 100; -/// Maximum proof-of-stake deferred credits in a bootstrap batch -pub const DEFERRED_CREDITS_BOOTSTRAP_PART_SIZE: u64 = 100; -/// Maximum executed ops per slot in a bootstrap batch -pub const EXECUTED_OPS_BOOTSTRAP_PART_SIZE: u64 = 10; /// Maximum number of consensus blocks in a bootstrap batch pub const CONSENSUS_BOOTSTRAP_PART_SIZE: u64 = 50; /// Maximum number of consensus block ids when sending a bootstrap cursor from the client @@ -206,12 +209,12 @@ pub const MAX_BOOTSTRAP_DEPS: u32 = 1000; pub const MAX_BOOTSTRAP_CHILDREN: u32 = 1000; /// Max number of cycles in PoS bootstrap pub const MAX_BOOTSTRAP_POS_CYCLES: u32 = 5; -/// Max number of address and random entries for PoS bootstrap -pub const MAX_BOOTSTRAP_POS_ENTRIES: u32 = 1000000000; /// Max async pool changes pub const MAX_BOOTSTRAP_ASYNC_POOL_CHANGES: u64 = 100_000; /// Max bytes in final states parts pub const MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE: u64 = 1_000_000_000; +/// Max bytes in final states parts +pub const MAX_BOOTSTRAPPED_NEW_ELEMENTS: u64 = 500; /// Max size of the IP list pub const IP_LIST_MAX_SIZE: usize = 10000; /// Size of the random bytes array used for the bootstrap, safe to import @@ -223,8 +226,12 @@ pub const MAX_BOOTSTRAP_ERROR_LENGTH: u64 = 10000; pub const PROTOCOL_CONTROLLER_CHANNEL_SIZE: usize = 1024; /// Protocol event channel size pub const PROTOCOL_EVENT_CHANNEL_SIZE: usize = 1024; -/// Pool controller channel size -pub const POOL_CONTROLLER_CHANNEL_SIZE: usize = 1024; +/// Pool controller operations channel size +pub const POOL_CONTROLLER_OPERATIONS_CHANNEL_SIZE: usize = 1024; +/// Pool controller endorsements channel size +pub const POOL_CONTROLLER_ENDORSEMENTS_CHANNEL_SIZE: usize = 1024; +/// Pool controller denunciations channel size +pub const POOL_CONTROLLER_DENUNCIATIONS_CHANNEL_SIZE: usize = 1024; // *********************** // Constants used for execution module (injected from ConsensusConfig) diff --git a/massa-models/src/denunciation.rs b/massa-models/src/denunciation.rs index c032d7a2d85..547e82291e6 100644 --- a/massa-models/src/denunciation.rs +++ b/massa-models/src/denunciation.rs @@ -745,7 +745,7 @@ impl Deserializer for DenunciationDeserializer { // Denunciation Index -#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)] /// Index for Denunciations in collections (e.g. like a HashMap...) pub enum DenunciationIndex { /// Variant for Block header denunciation index @@ -869,6 +869,7 @@ impl From<&DenunciationIndex> for DenunciationIndexTypeId { } } +#[derive(Clone)] /// Serializer for `DenunciationIndex` pub struct DenunciationIndexSerializer { u32_serializer: U32VarIntSerializer, @@ -916,6 +917,7 @@ impl Serializer for DenunciationIndexSerializer { } } +#[derive(Clone)] /// Deserializer for `DenunciationIndex` pub struct DenunciationIndexDeserializer { id_deserializer: U32VarIntDeserializer, @@ -1295,7 +1297,7 @@ mod tests { #[test] fn test_forge_invalid_denunciation() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let slot_1 = Slot::new(4, 2); let slot_2 = Slot::new(3, 7); diff --git a/massa-models/src/endorsement.rs b/massa-models/src/endorsement.rs index d405c6fe76e..83772d1d361 100644 --- a/massa-models/src/endorsement.rs +++ b/massa-models/src/endorsement.rs @@ -430,7 +430,7 @@ mod tests { #[test] #[serial] fn test_endorsement_serialization() { - let sender_keypair = KeyPair::generate(); + let sender_keypair = KeyPair::generate(0).unwrap(); let content = Endorsement { slot: Slot::new(10, 1), index: 0, @@ -455,7 +455,7 @@ mod tests { #[test] #[serial] fn test_endorsement_lightweight_serialization() { - let sender_keypair = KeyPair::generate(); + let sender_keypair = KeyPair::generate(0).unwrap(); let content = Endorsement { slot: Slot::new(10, 1), index: 0, @@ -486,7 +486,7 @@ mod tests { fn test_verify_sig_batch() { // test verify_signature_batch as we override SecureShareEndorsements compute_hash - let sender_keypair = KeyPair::generate(); + let sender_keypair = KeyPair::generate(0).unwrap(); let content_1 = Endorsement { slot: Slot::new(10, 1), index: 0, @@ -503,7 +503,7 @@ mod tests { SecureShareDeserializer::new(EndorsementDeserializer::new(32, 32)) .deserialize::(&serialized) .unwrap(); - let sender_keypair = KeyPair::generate(); + let sender_keypair = KeyPair::generate(0).unwrap(); let content_2 = Endorsement { slot: Slot::new(2, 5), index: 0, diff --git a/massa-models/src/mapping_grpc.rs b/massa-models/src/mapping_grpc.rs index 48fd700c49a..ad87de58cf8 100644 --- a/massa-models/src/mapping_grpc.rs +++ b/massa-models/src/mapping_grpc.rs @@ -330,7 +330,7 @@ pub fn secure_share_to_vec(value: grpc::SecureShare) -> Result, ModelsEr Vec::with_capacity(value.signature.len() + pub_key_b.len() + value.serialized_data.len()); serialized_content .extend_from_slice(&Signature::from_str(&value.signature).map(|value| value.to_bytes())?); - serialized_content.extend_from_slice(pub_key_b); + serialized_content.extend_from_slice(&pub_key_b); serialized_content.extend_from_slice(&value.serialized_data); Ok(serialized_content) diff --git a/massa-models/src/node.rs b/massa-models/src/node.rs index 3935412ab9d..bcc45261d4b 100644 --- a/massa-models/src/node.rs +++ b/massa-models/src/node.rs @@ -1,12 +1,8 @@ // Copyright (c) 2022 MASSA LABS use crate::error::ModelsError; -use massa_serialization::{ - DeserializeError, Deserializer, Serializer, U64VarIntDeserializer, U64VarIntSerializer, -}; use massa_signature::PublicKey; use serde_with::{DeserializeFromStr, SerializeDisplay}; -use std::ops::Bound::Included; /// `NodeId` wraps a public key to uniquely identify a node. #[derive( @@ -15,7 +11,6 @@ use std::ops::Bound::Included; pub struct NodeId(PublicKey); const NODEID_PREFIX: char = 'N'; -const NODEID_VERSION: u64 = 0; impl NodeId { /// Create a new `NodeId` from a public key. @@ -31,18 +26,11 @@ impl NodeId { impl std::fmt::Display for NodeId { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let u64_serializer = U64VarIntSerializer::new(); - // might want to allocate the vector with capacity in order to avoid re-allocation - let mut bytes: Vec = Vec::new(); - u64_serializer - .serialize(&NODEID_VERSION, &mut bytes) - .map_err(|_| std::fmt::Error)?; - bytes.extend(self.0.to_bytes()); write!( f, "{}{}", NODEID_PREFIX, - bs58::encode(bytes).with_check().into_string() + bs58::encode(self.0.to_bytes()).with_check().into_string() ) } } @@ -62,10 +50,11 @@ impl std::str::FromStr for NodeId { /// # use serde::{Deserialize, Serialize}; /// # use std::str::FromStr; /// # use massa_models::node::NodeId; - /// # let keypair = KeyPair::generate(); + /// # let keypair = KeyPair::generate(0).unwrap(); /// # let node_id = NodeId::new(keypair.get_public_key()); /// let ser = node_id.to_string(); /// let res_node_id = NodeId::from_str(&ser).unwrap(); + /// let from_raw = NodeId::from_str("N12UbyLJDS7zimGWf3LTHe8hYY67RdLke1iDRZqJbQQLHQSKPW8j").unwrap(); /// assert_eq!(node_id, res_node_id); /// ``` fn from_str(s: &str) -> Result { @@ -77,13 +66,7 @@ impl std::str::FromStr for NodeId { .with_check(None) .into_vec() .map_err(|_| ModelsError::NodeIdParseError)?; - let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); - let (rest, _version) = u64_deserializer - .deserialize::(&decoded_bs58_check[..]) - .map_err(|_| ModelsError::NodeIdParseError)?; - Ok(NodeId(PublicKey::from_bytes( - rest.try_into().map_err(|_| ModelsError::NodeIdParseError)?, - )?)) + Ok(NodeId(PublicKey::from_bytes(&decoded_bs58_check)?)) } _ => Err(ModelsError::NodeIdParseError), } diff --git a/massa-models/src/operation.rs b/massa-models/src/operation.rs index 1383666ac5f..94c337e8aa7 100644 --- a/massa-models/src/operation.rs +++ b/massa-models/src/operation.rs @@ -301,7 +301,7 @@ impl Serializer for OperationSerializer { /// use massa_serialization::Serializer; /// use std::str::FromStr; /// - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let op = OperationType::Transaction { /// recipient_address: Address::from_public_key(&keypair.get_public_key()), /// amount: Amount::from_str("300").unwrap(), @@ -366,7 +366,7 @@ impl Deserializer for OperationDeserializer { /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// use std::str::FromStr; /// - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let op = OperationType::Transaction { /// recipient_address: Address::from_public_key(&keypair.get_public_key()), /// amount: Amount::from_str("300").unwrap(), @@ -558,7 +558,7 @@ impl Serializer for OperationTypeSerializer { /// use massa_serialization::{Deserializer, Serializer, DeserializeError}; /// use std::str::FromStr; /// - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let op = OperationType::ExecuteSC { /// data: vec![0x01, 0x02, 0x03], /// max_gas: 100, @@ -686,7 +686,7 @@ impl Deserializer for OperationTypeDeserializer { /// use massa_serialization::{Deserializer, Serializer, DeserializeError}; /// use std::str::FromStr; /// - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let op = OperationType::ExecuteSC { /// data: vec![0x01, 0x02, 0x03], /// max_gas: 100, @@ -1177,7 +1177,7 @@ impl Serializer> for OperationsSerializer { /// use massa_serialization::Serializer; /// use std::str::FromStr; /// - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let op = OperationType::Transaction { /// recipient_address: Address::from_public_key(&keypair.get_public_key()), /// amount: Amount::from_str("300").unwrap(), @@ -1250,7 +1250,7 @@ impl Deserializer> for OperationsDeserializer { /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// use std::str::FromStr; /// - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let op = OperationType::Transaction { /// recipient_address: Address::from_public_key(&keypair.get_public_key()), /// amount: Amount::from_str("300").unwrap(), @@ -1308,8 +1308,8 @@ mod tests { #[test] #[serial] fn test_transaction() { - let sender_keypair = KeyPair::generate(); - let recv_keypair = KeyPair::generate(); + let sender_keypair = KeyPair::generate(0).unwrap(); + let recv_keypair = KeyPair::generate(0).unwrap(); let op = OperationType::Transaction { recipient_address: Address::from_public_key(&recv_keypair.get_public_key()), @@ -1381,7 +1381,7 @@ mod tests { #[test] #[serial] fn test_executesc() { - let sender_keypair = KeyPair::generate(); + let sender_keypair = KeyPair::generate(0).unwrap(); let op = OperationType::ExecuteSC { max_gas: 123, @@ -1456,9 +1456,9 @@ mod tests { #[test] #[serial] fn test_callsc() { - let sender_keypair = KeyPair::generate(); + let sender_keypair = KeyPair::generate(0).unwrap(); - let target_keypair = KeyPair::generate(); + let target_keypair = KeyPair::generate(0).unwrap(); let target_addr = Address::from_public_key(&target_keypair.get_public_key()); let op = OperationType::CallSC { diff --git a/massa-models/src/secure_share.rs b/massa-models/src/secure_share.rs index b523651a877..de9d7a3771b 100644 --- a/massa-models/src/secure_share.rs +++ b/massa-models/src/secure_share.rs @@ -5,7 +5,6 @@ use massa_hash::Hash; use massa_serialization::{Deserializer, SerializeError, Serializer}; use massa_signature::{ KeyPair, PublicKey, PublicKeyDeserializer, Signature, SignatureDeserializer, - PUBLIC_KEY_SIZE_BYTES, SIGNATURE_SIZE_BYTES, }; use nom::{ error::{context, ContextError, ParseError}, @@ -116,8 +115,8 @@ where serialized_content: &[u8], buffer: &mut Vec, ) -> Result<(), SerializeError> { - buffer.extend(signature.into_bytes()); - buffer.extend(creator_public_key.into_bytes()); + buffer.extend(signature.to_bytes()); + buffer.extend(creator_public_key.to_bytes()); buffer.extend(serialized_content); Ok(()) } @@ -227,8 +226,8 @@ where pub fn serialized_size(&self) -> usize { self.serialized_data .len() - .saturating_add(SIGNATURE_SIZE_BYTES) - .saturating_add(PUBLIC_KEY_SIZE_BYTES) + .saturating_add(self.signature.get_ser_len()) + .saturating_add(self.content_creator_pub_key.get_ser_len()) } } @@ -373,7 +372,7 @@ where /// index: 0, /// endorsed_block: BlockId(Hash::compute_from("blk".as_bytes())), /// }; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let secured: SecureShare = Endorsement::new_verifiable( /// content, /// EndorsementSerializer::new(), diff --git a/massa-models/src/serialization.rs b/massa-models/src/serialization.rs index 7691fd5e97b..76ce34835c7 100644 --- a/massa-models/src/serialization.rs +++ b/massa-models/src/serialization.rs @@ -577,6 +577,7 @@ where } } +#[derive(Clone)] /// `BitVec` Serializer pub struct BitVecSerializer { u32_serializer: U32VarIntSerializer, @@ -611,6 +612,7 @@ impl Serializer> for BitVecSerializer { } } +#[derive(Clone)] /// `BitVec` Deserializer pub struct BitVecDeserializer { u32_deserializer: U32VarIntDeserializer, diff --git a/massa-models/src/stats.rs b/massa-models/src/stats.rs index c40e4b63538..c3e647c04a5 100644 --- a/massa-models/src/stats.rs +++ b/massa-models/src/stats.rs @@ -26,12 +26,12 @@ impl std::fmt::Display for ExecutionStats { writeln!( f, "\tStart stats timespan time: {}", - self.time_window_start.to_utc_string() + self.time_window_start.format_instant() )?; writeln!( f, "\tEnd stats timespan time: {}", - self.time_window_end.to_utc_string() + self.time_window_end.format_instant() )?; writeln!( f, @@ -96,12 +96,12 @@ impl std::fmt::Display for ConsensusStats { writeln!( f, "\tStart stats timespan time: {}", - self.start_timespan.to_utc_string() + self.start_timespan.format_instant() )?; writeln!( f, "\tEnd stats timespan time: {}", - self.end_timespan.to_utc_string() + self.end_timespan.format_instant() )?; writeln!(f, "\tFinal block count: {}", self.final_block_count)?; writeln!(f, "\tStale block count: {}", self.stale_block_count)?; diff --git a/massa-models/src/test_exports/data.rs b/massa-models/src/test_exports/data.rs index 8f490058017..a01a31eab9e 100644 --- a/massa-models/src/test_exports/data.rs +++ b/massa-models/src/test_exports/data.rs @@ -21,7 +21,7 @@ pub fn gen_endorsements_for_denunciation( SecureShareEndorsement, SecureShareEndorsement, ) { - let keypair = with_keypair.unwrap_or(KeyPair::generate()); + let keypair = with_keypair.unwrap_or(KeyPair::generate(0).unwrap()); let slot = with_slot.unwrap_or(Slot::new(3, 7)); let endorsement_1 = Endorsement { @@ -64,7 +64,7 @@ pub fn gen_block_headers_for_denunciation( with_slot: Option, with_keypair: Option, ) -> (Slot, KeyPair, SecuredHeader, SecuredHeader, SecuredHeader) { - let keypair = with_keypair.unwrap_or(KeyPair::generate()); + let keypair = with_keypair.unwrap_or(KeyPair::generate(0).unwrap()); let slot = with_slot.unwrap_or(Slot::new(2, 1)); let parents_1: Vec = (0..THREAD_COUNT) @@ -87,6 +87,8 @@ pub fn gen_block_headers_for_denunciation( .unwrap(); let block_header_1 = BlockHeader { + current_version: 0, + announced_version: 0, slot, parents: parents_1, operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -103,6 +105,8 @@ pub fn gen_block_headers_for_denunciation( .expect("error while producing block header"); let block_header_2 = BlockHeader { + current_version: 0, + announced_version: 0, slot, parents: parents_2, operation_merkle_root: Hash::compute_from("mno".as_bytes()), @@ -119,6 +123,8 @@ pub fn gen_block_headers_for_denunciation( .expect("error while producing block header"); let block_header_3 = BlockHeader { + current_version: 0, + announced_version: 0, slot, parents: parents_3, operation_merkle_root: Hash::compute_from("mno".as_bytes()), diff --git a/massa-models/src/timeslots.rs b/massa-models/src/timeslots.rs index c898de3b037..2e1471bd0bd 100644 --- a/massa-models/src/timeslots.rs +++ b/massa-models/src/timeslots.rs @@ -230,8 +230,8 @@ mod tests { #[serial] fn test_time_range_to_slot_range() { let thread_count = 3u8; - let t0: MassaTime = 30.into(); - let genesis_timestamp: MassaTime = 100.into(); + let t0: MassaTime = MassaTime::from_millis(30); + let genesis_timestamp: MassaTime = MassaTime::from_millis(100); /* slots: (0, 0) (0, 1) (0, 2) (1, 0) (1, 1) (1, 2) (2, 0) (2, 1) (2, 2) time: 100 110 120 130 140 150 160 170 180 */ @@ -241,8 +241,8 @@ mod tests { thread_count, t0, genesis_timestamp, - Some(111.into()), - Some(115.into()), + Some(MassaTime::from_millis(111)), + Some(MassaTime::from_millis(115)), ) .unwrap(); assert_eq!(out_start, out_end); @@ -252,8 +252,8 @@ mod tests { thread_count, t0, genesis_timestamp, - Some(10.into()), - Some(100.into()), + Some(MassaTime::from_millis(10)), + Some(MassaTime::from_millis(100)), ) .unwrap(); assert_eq!(out_start, out_end); @@ -263,8 +263,8 @@ mod tests { thread_count, t0, genesis_timestamp, - Some(115.into()), - Some(145.into()), + Some(MassaTime::from_millis(115)), + Some(MassaTime::from_millis(145)), ) .unwrap(); assert_eq!(out_start, Some(Slot::new(0, 2))); @@ -275,8 +275,8 @@ mod tests { thread_count, t0, genesis_timestamp, - Some(110.into()), - Some(160.into()), + Some(MassaTime::from_millis(110)), + Some(MassaTime::from_millis(160)), ) .unwrap(); assert_eq!(out_start, Some(Slot::new(0, 1))); @@ -287,13 +287,17 @@ mod tests { #[serial] fn test_get_closest_slot_to_timestamp() { let thread_count = 3u8; - let t0: MassaTime = 30.into(); - let genesis_timestamp: MassaTime = 100.into(); + let t0: MassaTime = MassaTime::from_millis(30); + let genesis_timestamp: MassaTime = MassaTime::from_millis(100); /* slots: (0, 0) (0, 1) (0, 2) (1, 0) (1, 1) (1, 2) (2, 0) (2, 1) (2, 2) time: 100 110 120 130 140 150 160 170 180 */ - let out_slot = - get_closest_slot_to_timestamp(thread_count, t0, genesis_timestamp, 150.into()); + let out_slot = get_closest_slot_to_timestamp( + thread_count, + t0, + genesis_timestamp, + MassaTime::from_millis(150), + ); assert_eq!(out_slot, Slot::new(1, 2)); } } diff --git a/massa-models/src/version.rs b/massa-models/src/version.rs index b033e5fed36..1b20894e1dc 100644 --- a/massa-models/src/version.rs +++ b/massa-models/src/version.rs @@ -191,10 +191,7 @@ impl Deserializer for VersionDeserializer { impl Version { /// true if instance and major are the same pub fn is_compatible(&self, other: &Version) -> bool { - self.instance == other.instance - && self.major == other.major - && self.minor >= 1 - && other.minor >= 1 + self.instance == other.instance && self.major == other.major } } diff --git a/massa-module-cache/Cargo.toml b/massa-module-cache/Cargo.toml index a25673df634..82b5a02d63b 100644 --- a/massa-module-cache/Cargo.toml +++ b/massa-module-cache/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_module_cache" -version = "0.1.0" +version = "0.23.0" edition = "2021" [dependencies] @@ -9,7 +9,7 @@ rocksdb = "0.20" serial_test = "1.0.0" rand = "0.8.5" num_enum = "0.5" -nom = "7.1" +nom = "=7.1" displaydoc = "0.2" thiserror = "1.0" anyhow = "1.0" diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index a58a41bb657..2459e06a8a0 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -1,28 +1,29 @@ [package] name = "massa-node" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + [dependencies] crossbeam-channel = "0.5.6" anyhow = "1.0" lazy_static = "1.4" parking_lot = { version = "0.12", features = ["deadlock_detection"] } serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" tokio = { version = "1.23", features = ["full"] } tracing = { version = "0.1", features = [ "max_level_debug", "release_max_level_debug", ] } -peernet = { git = "https://github.com/massalabs/PeerNet", rev = "1bb1f452bf63b78a89eb9542fb019b88d894c664" } +peernet = { git = "https://github.com/massalabs/PeerNet", rev = "bf8adf5" } tracing-subscriber = "0.3" paw = "1.0" structopt = { version = "0.3", features = ["paw"] } dialoguer = "0.10" +ctrlc = "3.2.5" # custom modules massa_api_exports = { path = "../massa-api-exports" } massa_api = { path = "../massa-api" } @@ -50,13 +51,12 @@ massa_wallet = { path = "../massa-wallet" } massa_factory_exports = { path = "../massa-factory-exports" } massa_factory_worker = { path = "../massa-factory-worker" } massa_grpc = { path = "../massa-grpc" } -massa_versioning_worker = { path = "../massa-versioning-worker" } -ctrlc = "3.2.5" +massa_versioning = { path = "../massa-versioning" } +massa_db = { path = "../massa-db" } # for more information on what are the following features used for, see the cargo.toml at workspace level [features] beta = [] deadlock_detection = [] -bootstrap_server = [] +bootstrap_server = ["massa_consensus_worker/bootstrap_server"] sandbox = ["massa_bootstrap/sandbox", "massa_consensus_worker/sandbox", "massa_execution_worker/sandbox", "massa_final_state/sandbox", "massa_models/sandbox"] -create_snapshot = ["massa_final_state/create_snapshot"] diff --git a/massa-node/base_config/config.toml b/massa-node/base_config/config.toml index 6db4851a16a..e9e3460fd31 100644 --- a/massa-node/base_config/config.toml +++ b/massa-node/base_config/config.toml @@ -40,7 +40,7 @@ [grpc] # whether to enable gRPC - enabled = false + enabled = true # whether to add HTTP 1 layer accept_http1 = false # whether to enable CORS. works only if `accept_http1` is true @@ -52,7 +52,7 @@ # whether to enable mTLS enable_mtls = false # bind for the Massa gRPC API - bind = "0.0.0.0:33037" + bind = "127.0.0.1:33037" # which compression encodings does the server accept for requests accept_compressed = "Gzip" # which compression encodings might the server use for responses @@ -127,24 +127,20 @@ [consensus] # max number of previously discarded blocks kept in RAM max_discarded_blocks = 100 - # if a block is at least future_block_processing_max_periods periods in the future, it is just discarded - future_block_processing_max_periods = 100 # max number of blocks in the future kept in RAM max_future_processing_blocks = 400 # max number of blocks waiting for dependencies max_dependency_blocks = 2048 - # number of final periods that must be kept at all times (increase to more resilience to short network disconnections, high values will increase RAM usage.) + # number of final periods that must be kept without operations (increase improve bootstrap process, high values will increase RAM usage.) + force_keep_final_periods_without_ops = 128 + # number of final periods that must be kept with operations (increase to more resilience to short network disconnections, high values will increase RAM usage.) force_keep_final_periods = 10 - # max milliseconds to wait while sending an event before dropping it - max_send_wait = 0 # useless blocks are pruned every block_db_prune_interval ms block_db_prune_interval = 5000 # considered timespan for stats info stats_timespan = 60000 - # max number of item returned per query - max_item_return_count = 100 # blocks headers channel capacity broadcast_blocks_headers_channel_capacity = 128 diff --git a/massa-node/base_config/initial_ledger.json b/massa-node/base_config/initial_ledger.json index 997efb0ad9d..1400a23a3ab 100644 --- a/massa-node/base_config/initial_ledger.json +++ b/massa-node/base_config/initial_ledger.json @@ -1,10 +1,10 @@ { - "AU12dhs6CsQk8AXFTYyUpc1P9e8GDf65ozU6RcigW68qfJV7vdbNf": { + "AU12Cyu2f7C7isA3ADAhoNuq9ZUFPKP24jmiGj3sh9D1pHoAWKDYY": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "AU12WQRoxQJKMjNG8hVjkyh4YgBwaYeUH4BsqJEEdTUJda37GhSx9": { + "AU12BTfZ7k1z6PsLEUZeHYNirz6WJ3NdrWto9H4TkVpkV9xE2TJg2": { "balance": "1000000000", "datastore": {}, "bytecode": [] @@ -14,37 +14,32 @@ "datastore": {}, "bytecode": [] }, - "AU12p3neq9Caq8idS33jrWuRZgfoBL3wAAfG2NdZEBxNdfujVtCLq": { + "AU1jDpoDo1p4PU4KwFMWbP1xktueEEmkn9MVFbom5oxfXLCyT7BR": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "AU122tXU6uhDfGP1BxCtvLQTvyascwsjX5NVo3vv1fssmdeKSeytM": { + "AU12FHsSoETq67gF3eVAecRR8cmoLGhFErUm4Q524tEsuoaCg4tSH": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "AU1xfaL8CTrZWTBY79JDMEokwYjc2U4gUFJqqgtbahj7gwYn2s6Y": { + "AU1r5GxEpBBGmtK7JY4LVHghjtr87jof5WkPh68oyddHRU32qct3": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "AU1xgxVCw4Vnr2s8JSnFYzz8UGuqZdMH7wHBDUapndgRCdDteWS": { + "AU1EQeYwEWzixmLY2MLbSdp4QsWTzYTeN3XqH2t9yqrjBeXN3Mug": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "AU12DPHgthL9JJGrASquAmsdef9oYpLCXzZRvyiXzY3TG87Hgcs4o": { + "AU12r1iM79EcS3sa4dmtUp28TiaPxK1weQcLsATcFoynPdukjdMqM": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "AU124Lusm3gJFwkCkY13FKXae2z2cV4pvhepJfxPHM2LJz8fEjU3C": { - "balance": "1000000000", - "datastore": {}, - "bytecode": [] - }, - "AU12spryWgt7VUoT9LRi8Roe5WVgUPLeaQ6GXMd9ABUQyfv62Ab18": { + "AU1LzG14pkp87tKDpbQPCjCnSEdww8KPeX1qaD4cxPzjJcd2fQWH": { "balance": "1000000000", "datastore": {}, "bytecode": [] diff --git a/massa-node/base_config/initial_rolls.json b/massa-node/base_config/initial_rolls.json index 0a6b4cb74a2..dfd3632776f 100644 --- a/massa-node/base_config/initial_rolls.json +++ b/massa-node/base_config/initial_rolls.json @@ -1,3 +1,11 @@ { - "AU12dhs6CsQk8AXFTYyUpc1P9e8GDf65ozU6RcigW68qfJV7vdbNf": 100000 + "AU12Cyu2f7C7isA3ADAhoNuq9ZUFPKP24jmiGj3sh9D1pHoAWKDYY": 1000, + "AU12BTfZ7k1z6PsLEUZeHYNirz6WJ3NdrWto9H4TkVpkV9xE2TJg2": 1000, + "AU1226KGgzq425xzpNmrUCggKcru4yMkFSUXGxYnTwCt6vso5PVbn": 1000, + "AU1jDpoDo1p4PU4KwFMWbP1xktueEEmkn9MVFbom5oxfXLCyT7BR": 1000, + "AU12FHsSoETq67gF3eVAecRR8cmoLGhFErUm4Q524tEsuoaCg4tSH": 1000, + "AU1r5GxEpBBGmtK7JY4LVHghjtr87jof5WkPh68oyddHRU32qct3": 1000, + "AU1EQeYwEWzixmLY2MLbSdp4QsWTzYTeN3XqH2t9yqrjBeXN3Mug": 1000, + "AU12r1iM79EcS3sa4dmtUp28TiaPxK1weQcLsATcFoynPdukjdMqM": 1000, + "AU1LzG14pkp87tKDpbQPCjCnSEdww8KPeX1qaD4cxPzjJcd2fQWH": 1000 } diff --git a/massa-node/base_config/openrpc.json b/massa-node/base_config/openrpc.json index 2e351ec4afa..a70e631c1df 100644 --- a/massa-node/base_config/openrpc.json +++ b/massa-node/base_config/openrpc.json @@ -2,7 +2,7 @@ "openrpc": "1.2.4", "info": { "title": "Massa OpenRPC Specification", - "version": "TEST.22.2", + "version": "TEST.23.0", "description": "Massa OpenRPC Specification document. Find more information on https://docs.massa.net/en/latest/technical-doc/api.html", "termsOfService": "https://open-rpc.org", "contact": { diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 3a00a64071d..d6edceb1bf0 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -9,6 +9,7 @@ extern crate massa_logging; use crate::settings::SETTINGS; use crossbeam_channel::{Receiver, TryRecvError}; +use ctrlc as _; use dialoguer::Password; use massa_api::{ApiServer, ApiV2, Private, Public, RpcServer, StopHandle, API}; use massa_api_exports::config::APIConfig; @@ -21,6 +22,7 @@ use massa_bootstrap::{ use massa_consensus_exports::events::ConsensusEvent; use massa_consensus_exports::{ConsensusChannels, ConsensusConfig, ConsensusManager}; use massa_consensus_worker::start_consensus_worker; +use massa_db::{MassaDB, MassaDBConfig}; use massa_executed_ops::{ExecutedDenunciationsConfig, ExecutedOpsConfig}; use massa_execution_exports::{ ExecutionChannels, ExecutionConfig, ExecutionManager, GasCosts, StorageCostsConstants, @@ -36,25 +38,23 @@ use massa_ledger_worker::FinalLedger; use massa_logging::massa_trace; use massa_models::address::Address; use massa_models::config::constants::{ - ASYNC_POOL_BOOTSTRAP_PART_SIZE, BLOCK_REWARD, BOOTSTRAP_RANDOMNESS_SIZE_BYTES, CHANNEL_SIZE, - CONSENSUS_BOOTSTRAP_PART_SIZE, DEFERRED_CREDITS_BOOTSTRAP_PART_SIZE, DELTA_F0, - DENUNCIATION_EXPIRE_PERIODS, ENDORSEMENT_COUNT, END_TIMESTAMP, - EXECUTED_OPS_BOOTSTRAP_PART_SIZE, GENESIS_KEY, GENESIS_TIMESTAMP, INITIAL_DRAW_SEED, - LEDGER_COST_PER_BYTE, LEDGER_ENTRY_BASE_SIZE, LEDGER_ENTRY_DATASTORE_BASE_SIZE, - LEDGER_PART_SIZE_MESSAGE_BYTES, MAX_ADVERTISE_LENGTH, MAX_ASK_BLOCKS_PER_MESSAGE, + BLOCK_REWARD, BOOTSTRAP_RANDOMNESS_SIZE_BYTES, CHANNEL_SIZE, CONSENSUS_BOOTSTRAP_PART_SIZE, + DELTA_F0, DENUNCIATION_EXPIRE_PERIODS, ENDORSEMENT_COUNT, END_TIMESTAMP, GENESIS_KEY, + GENESIS_TIMESTAMP, INITIAL_DRAW_SEED, LEDGER_COST_PER_BYTE, LEDGER_ENTRY_BASE_COST, + LEDGER_ENTRY_DATASTORE_BASE_SIZE, MAX_ADVERTISE_LENGTH, MAX_ASK_BLOCKS_PER_MESSAGE, MAX_ASYNC_GAS, MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, MAX_BLOCK_SIZE, MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, MAX_BOOTSTRAP_BLOCKS, MAX_BOOTSTRAP_ERROR_LENGTH, - MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, MAX_BYTECODE_LENGTH, MAX_CONSENSUS_BLOCKS_IDS, - MAX_DATASTORE_ENTRY_COUNT, MAX_DATASTORE_KEY_LENGTH, MAX_DATASTORE_VALUE_LENGTH, - MAX_DEFERRED_CREDITS_LENGTH, MAX_DENUNCIATIONS_PER_BLOCK_HEADER, - MAX_DENUNCIATION_CHANGES_LENGTH, MAX_ENDORSEMENTS_PER_MESSAGE, MAX_EXECUTED_OPS_CHANGES_LENGTH, - MAX_EXECUTED_OPS_LENGTH, MAX_FUNCTION_NAME_LENGTH, MAX_GAS_PER_BLOCK, MAX_LEDGER_CHANGES_COUNT, - MAX_LISTENERS_PER_PEER, MAX_OPERATIONS_PER_BLOCK, MAX_OPERATIONS_PER_MESSAGE, - MAX_OPERATION_DATASTORE_ENTRY_COUNT, MAX_OPERATION_DATASTORE_KEY_LENGTH, - MAX_OPERATION_DATASTORE_VALUE_LENGTH, MAX_OPERATION_STORAGE_TIME, MAX_PARAMETERS_SIZE, - MAX_PEERS_IN_ANNOUNCEMENT_LIST, MAX_PRODUCTION_STATS_LENGTH, MAX_ROLLS_COUNT_LENGTH, - MAX_SIZE_CHANNEL_COMMANDS_CONNECTIVITY, MAX_SIZE_CHANNEL_COMMANDS_PEERS, - MAX_SIZE_CHANNEL_COMMANDS_PEER_TESTERS, MAX_SIZE_CHANNEL_COMMANDS_PROPAGATION_BLOCKS, + MAX_BYTECODE_LENGTH, MAX_CONSENSUS_BLOCKS_IDS, MAX_DATASTORE_ENTRY_COUNT, + MAX_DATASTORE_KEY_LENGTH, MAX_DATASTORE_VALUE_LENGTH, MAX_DEFERRED_CREDITS_LENGTH, + MAX_DENUNCIATIONS_PER_BLOCK_HEADER, MAX_DENUNCIATION_CHANGES_LENGTH, + MAX_ENDORSEMENTS_PER_MESSAGE, MAX_EXECUTED_OPS_CHANGES_LENGTH, MAX_EXECUTED_OPS_LENGTH, + MAX_FUNCTION_NAME_LENGTH, MAX_GAS_PER_BLOCK, MAX_LEDGER_CHANGES_COUNT, MAX_LISTENERS_PER_PEER, + MAX_OPERATIONS_PER_BLOCK, MAX_OPERATIONS_PER_MESSAGE, MAX_OPERATION_DATASTORE_ENTRY_COUNT, + MAX_OPERATION_DATASTORE_KEY_LENGTH, MAX_OPERATION_DATASTORE_VALUE_LENGTH, + MAX_OPERATION_STORAGE_TIME, MAX_PARAMETERS_SIZE, MAX_PEERS_IN_ANNOUNCEMENT_LIST, + MAX_PRODUCTION_STATS_LENGTH, MAX_ROLLS_COUNT_LENGTH, MAX_SIZE_CHANNEL_COMMANDS_CONNECTIVITY, + MAX_SIZE_CHANNEL_COMMANDS_PEERS, MAX_SIZE_CHANNEL_COMMANDS_PEER_TESTERS, + MAX_SIZE_CHANNEL_COMMANDS_PROPAGATION_BLOCKS, MAX_SIZE_CHANNEL_COMMANDS_PROPAGATION_ENDORSEMENTS, MAX_SIZE_CHANNEL_COMMANDS_PROPAGATION_OPERATIONS, MAX_SIZE_CHANNEL_COMMANDS_RETRIEVAL_BLOCKS, MAX_SIZE_CHANNEL_COMMANDS_RETRIEVAL_ENDORSEMENTS, @@ -62,11 +62,15 @@ use massa_models::config::constants::{ MAX_SIZE_CHANNEL_NETWORK_TO_ENDORSEMENT_HANDLER, MAX_SIZE_CHANNEL_NETWORK_TO_OPERATION_HANDLER, MAX_SIZE_CHANNEL_NETWORK_TO_PEER_HANDLER, MIP_STORE_STATS_BLOCK_CONSIDERED, MIP_STORE_STATS_COUNTERS_MAX, OPERATION_VALIDITY_PERIODS, PERIODS_PER_CYCLE, - POOL_CONTROLLER_CHANNEL_SIZE, POS_MISS_RATE_DEACTIVATION_THRESHOLD, POS_SAVED_CYCLES, - PROTOCOL_CONTROLLER_CHANNEL_SIZE, PROTOCOL_EVENT_CHANNEL_SIZE, - ROLL_COUNT_TO_SLASH_ON_DENUNCIATION, ROLL_PRICE, SELECTOR_DRAW_CACHE_SIZE, T0, THREAD_COUNT, - VERSION, + POS_MISS_RATE_DEACTIVATION_THRESHOLD, POS_SAVED_CYCLES, PROTOCOL_CONTROLLER_CHANNEL_SIZE, + PROTOCOL_EVENT_CHANNEL_SIZE, ROLL_COUNT_TO_SLASH_ON_DENUNCIATION, ROLL_PRICE, + SELECTOR_DRAW_CACHE_SIZE, T0, THREAD_COUNT, VERSION, }; +use massa_models::config::{ + MAX_BOOTSTRAPPED_NEW_ELEMENTS, MAX_MESSAGE_SIZE, POOL_CONTROLLER_DENUNCIATIONS_CHANNEL_SIZE, + POOL_CONTROLLER_ENDORSEMENTS_CHANNEL_SIZE, POOL_CONTROLLER_OPERATIONS_CHANNEL_SIZE, +}; +use massa_models::slot::Slot; use massa_pool_exports::{PoolChannels, PoolConfig, PoolManager}; use massa_pool_worker::start_pool_controller; use massa_pos_exports::{PoSConfig, SelectorConfig, SelectorManager}; @@ -75,7 +79,10 @@ use massa_protocol_exports::{ProtocolConfig, ProtocolManager}; use massa_protocol_worker::{create_protocol_controller, start_protocol_controller}; use massa_storage::Storage; use massa_time::MassaTime; -use massa_versioning_worker::versioning::{MipStatsConfig, MipStore}; +use massa_versioning::{ + mips::MIP_LIST, + versioning::{MipStatsConfig, MipStore}, +}; use massa_wallet::Wallet; use parking_lot::RwLock; use peernet::transports::TransportType; @@ -97,6 +104,7 @@ mod settings; async fn launch( args: &Args, node_wallet: Arc>, + sig_int_toggled: Arc<(Mutex, Condvar)>, ) -> ( Receiver, Option, @@ -136,6 +144,36 @@ async fn launch( } } + use massa_models::config::constants::DOWNTIME_END_TIMESTAMP; + use massa_models::config::constants::DOWNTIME_START_TIMESTAMP; + + // Simulate downtime + // last_start_period should be set to trigger after the DOWNTIME_END_TIMESTAMP + #[cfg(not(feature = "bootstrap_server"))] + if now >= DOWNTIME_START_TIMESTAMP && now <= DOWNTIME_END_TIMESTAMP { + let (days, hours, mins, secs) = DOWNTIME_END_TIMESTAMP + .saturating_sub(now) + .days_hours_mins_secs() + .unwrap(); + + if let Ok(Some(end_period)) = massa_models::timeslots::get_latest_block_slot_at_timestamp( + THREAD_COUNT, + T0, + *GENESIS_TIMESTAMP, + DOWNTIME_END_TIMESTAMP, + ) { + panic!( + "We are in downtime! {} days, {} hours, {} minutes, {} seconds remaining to the end of the downtime. Downtime end period: {}", + days, hours, mins, secs, end_period.period + ); + } + + panic!( + "We are in downtime! {} days, {} hours, {} minutes, {} seconds remaining to the end of the downtime", + days, hours, mins, secs, + ); + } + // Storage shared by multiple components. let shared_storage: Storage = Storage::create_root(); @@ -145,28 +183,29 @@ async fn launch( initial_ledger_path: SETTINGS.ledger.initial_ledger_path.clone(), disk_ledger_path: SETTINGS.ledger.disk_ledger_path.clone(), max_key_length: MAX_DATASTORE_KEY_LENGTH, - max_ledger_part_size: LEDGER_PART_SIZE_MESSAGE_BYTES, max_datastore_value_length: MAX_DATASTORE_VALUE_LENGTH, }; let async_pool_config = AsyncPoolConfig { max_length: MAX_ASYNC_POOL_LENGTH, thread_count: THREAD_COUNT, - bootstrap_part_size: ASYNC_POOL_BOOTSTRAP_PART_SIZE, max_async_message_data: MAX_ASYNC_MESSAGE_DATA, + max_key_length: MAX_DATASTORE_KEY_LENGTH as u32, }; let pos_config = PoSConfig { periods_per_cycle: PERIODS_PER_CYCLE, thread_count: THREAD_COUNT, cycle_history_length: POS_SAVED_CYCLES, - credits_bootstrap_part_size: DEFERRED_CREDITS_BOOTSTRAP_PART_SIZE, + max_rolls_length: MAX_ROLLS_COUNT_LENGTH, + max_production_stats_length: MAX_PRODUCTION_STATS_LENGTH, + max_credit_length: MAX_DEFERRED_CREDITS_LENGTH, }; let executed_ops_config = ExecutedOpsConfig { thread_count: THREAD_COUNT, - bootstrap_part_size: EXECUTED_OPS_BOOTSTRAP_PART_SIZE, }; let executed_denunciations_config = ExecutedDenunciationsConfig { denunciation_expire_periods: DENUNCIATION_EXPIRE_PERIODS, - bootstrap_part_size: EXECUTED_OPS_BOOTSTRAP_PART_SIZE, + thread_count: THREAD_COUNT, + endorsement_count: ENDORSEMENT_COUNT, }; let final_state_config = FinalStateConfig { ledger_config: ledger_config.clone(), @@ -182,6 +221,8 @@ async fn launch( endorsement_count: ENDORSEMENT_COUNT, max_executed_denunciations_length: MAX_DENUNCIATION_CHANGES_LENGTH, max_denunciations_per_block_header: MAX_DENUNCIATIONS_PER_BLOCK_HEADER, + t0: T0, + genesis_timestamp: *GENESIS_TIMESTAMP, }; // Remove current disk ledger if there is one and we don't want to restart from snapshot @@ -199,11 +240,16 @@ async fn launch( } } + let db_config = MassaDBConfig { + path: SETTINGS.ledger.disk_ledger_path.clone(), + max_history_length: SETTINGS.ledger.final_history_length, + max_new_elements: MAX_BOOTSTRAPPED_NEW_ELEMENTS as usize, + thread_count: THREAD_COUNT, + }; + let db = Arc::new(RwLock::new(MassaDB::new(db_config))); + // Create final ledger - let ledger = FinalLedger::new( - ledger_config.clone(), - args.restart_from_snapshot_at_period.is_some() || cfg!(feature = "create_snapshot"), - ); + let ledger = FinalLedger::new(ledger_config.clone(), db.clone()); // launch selector worker let (selector_manager, selector_controller) = start_selector_worker(SelectorConfig { @@ -216,38 +262,38 @@ async fn launch( }) .expect("could not start selector worker"); + // Creates an empty default store + let mip_stats_config = MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }; + let mip_store = + MipStore::try_from((MIP_LIST, mip_stats_config)).expect("mip store creation failed"); + // Create final state, either from a snapshot, or from scratch let final_state = Arc::new(parking_lot::RwLock::new( match args.restart_from_snapshot_at_period { Some(last_start_period) => FinalState::new_derived_from_snapshot( + db.clone(), final_state_config, Box::new(ledger), selector_controller.clone(), + mip_store.clone(), last_start_period, ) .expect("could not init final state"), None => FinalState::new( + db.clone(), final_state_config, Box::new(ledger), selector_controller.clone(), + mip_store.clone(), + true, ) .expect("could not init final state"), }, )); - // interrupt signal listener - let interupted = Arc::new((Mutex::new(false), Condvar::new())); - let handler_clone = Arc::clone(&interupted); - - // currently used by the bootstrap client to break out of the to preempt the retry wait - ctrlc::set_handler(move || { - *handler_clone - .0 - .lock() - .expect("double-lock on interupt bool in ctrl-c handler") = true; - handler_clone.1.notify_all(); - }) - .expect("Error setting Ctrl-C handler"); let bootstrap_config: BootstrapConfig = BootstrapConfig { bootstrap_list: SETTINGS.bootstrap.bootstrap_list.clone(), bootstrap_protocol: SETTINGS.bootstrap.bootstrap_protocol, @@ -278,7 +324,7 @@ async fn launch( max_advertise_length: MAX_ADVERTISE_LENGTH, max_bootstrap_blocks_length: MAX_BOOTSTRAP_BLOCKS, max_bootstrap_error_length: MAX_BOOTSTRAP_ERROR_LENGTH, - max_bootstrap_final_state_parts_size: MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, + max_new_elements: MAX_BOOTSTRAPPED_NEW_ELEMENTS, max_async_pool_changes: MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, max_async_pool_length: MAX_ASYNC_POOL_LENGTH, max_async_message_data: MAX_ASYNC_MESSAGE_DATA, @@ -313,7 +359,7 @@ async fn launch( *GENESIS_TIMESTAMP, *END_TIMESTAMP, args.restart_from_snapshot_at_period, - interupted, + sig_int_toggled, ) { Ok(vals) => vals, Err(BootstrapError::Interupted(msg)) => { @@ -323,9 +369,13 @@ async fn launch( Err(err) => panic!("critical error detected in the bootstrap process: {}", err), }; + if !final_state.read().is_db_valid() { + // TODO: Bootstrap again instead of panicking + panic!("critical: db is not valid after bootstrap"); + } + if args.restart_from_snapshot_at_period.is_none() { - let last_start_period = final_state.read().last_start_period; - final_state.write().init_ledger_hash(last_start_period); + final_state.write().recompute_caches(); // give the controller to final state in order for it to feed the cycles final_state @@ -334,30 +384,42 @@ async fn launch( .expect("could not compute initial draws"); // TODO: this might just mean a bad bootstrap, no need to panic, just reboot } + let last_slot_before_downtime_ = final_state.read().last_slot_before_downtime; + if let Some(last_slot_before_downtime) = last_slot_before_downtime_ { + let last_shutdown_start = last_slot_before_downtime + .get_next_slot(THREAD_COUNT) + .unwrap(); + let last_shutdown_end = Slot::new(final_state.read().last_start_period, 0) + .get_prev_slot(THREAD_COUNT) + .unwrap(); + if !final_state + .read() + .mip_store + .is_coherent_with_shutdown_period( + last_shutdown_start, + last_shutdown_end, + THREAD_COUNT, + T0, + *GENESIS_TIMESTAMP, + ) + .unwrap_or(false) + { + panic!( + "MIP store is not coherent with last shutdown period ({} - {})", + last_shutdown_start, last_shutdown_end + ); + } + } + // Storage costs constants let storage_costs_constants = StorageCostsConstants { ledger_cost_per_byte: LEDGER_COST_PER_BYTE, - ledger_entry_base_cost: LEDGER_COST_PER_BYTE - .checked_mul_u64(LEDGER_ENTRY_BASE_SIZE as u64) - .expect("Overflow when creating constant ledger_entry_base_cost"), + ledger_entry_base_cost: LEDGER_ENTRY_BASE_COST, ledger_entry_datastore_base_cost: LEDGER_COST_PER_BYTE .checked_mul_u64(LEDGER_ENTRY_DATASTORE_BASE_SIZE as u64) .expect("Overflow when creating constant ledger_entry_datastore_base_size"), }; - // Creates an empty default store - let mip_stats_config = MipStatsConfig { - block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, - counters_max: MIP_STORE_STATS_COUNTERS_MAX, - }; - let mut mip_store = - MipStore::try_from(([], mip_stats_config)).expect("Cannot create an empty MIP store"); - if let Some(bootstrap_mip_store) = bootstrap_state.mip_store { - mip_store - .update_with(&bootstrap_mip_store) - .expect("Cannot update MIP store with bootstrap mip store"); - } - // launch execution module let execution_config = ExecutionConfig { max_final_events: SETTINGS.execution.max_final_events, @@ -425,7 +487,9 @@ async fn launch( max_operations_per_block: MAX_OPERATIONS_PER_BLOCK, max_operation_pool_size_per_thread: SETTINGS.pool.max_pool_size_per_thread, max_endorsements_pool_size_per_thread: SETTINGS.pool.max_pool_size_per_thread, - channels_size: POOL_CONTROLLER_CHANNEL_SIZE, + operations_channel_size: POOL_CONTROLLER_OPERATIONS_CHANNEL_SIZE, + endorsements_channel_size: POOL_CONTROLLER_ENDORSEMENTS_CHANNEL_SIZE, + denunciations_channel_size: POOL_CONTROLLER_DENUNCIATIONS_CHANNEL_SIZE, broadcast_enabled: SETTINGS.api.enable_broadcast, broadcast_endorsements_channel_capacity: SETTINGS .pool @@ -485,6 +549,7 @@ async fn launch( genesis_timestamp: *GENESIS_TIMESTAMP, t0: T0, endorsement_count: ENDORSEMENT_COUNT, + max_message_size: MAX_MESSAGE_SIZE as usize, max_operations_propagation_time: SETTINGS.protocol.max_operations_propagation_time, max_endorsements_propagation_time: SETTINGS.protocol.max_endorsements_propagation_time, last_start_period: final_state.read().last_start_period, @@ -550,18 +615,15 @@ async fn launch( t0: T0, genesis_key: GENESIS_KEY.clone(), max_discarded_blocks: SETTINGS.consensus.max_discarded_blocks, - future_block_processing_max_periods: SETTINGS.consensus.future_block_processing_max_periods, max_future_processing_blocks: SETTINGS.consensus.max_future_processing_blocks, max_dependency_blocks: SETTINGS.consensus.max_dependency_blocks, delta_f0: DELTA_F0, operation_validity_periods: OPERATION_VALIDITY_PERIODS, periods_per_cycle: PERIODS_PER_CYCLE, stats_timespan: SETTINGS.consensus.stats_timespan, - max_send_wait: SETTINGS.consensus.max_send_wait, force_keep_final_periods: SETTINGS.consensus.force_keep_final_periods, endorsement_count: ENDORSEMENT_COUNT, block_db_prune_interval: SETTINGS.consensus.block_db_prune_interval, - max_item_return_count: SETTINGS.consensus.max_item_return_count, max_gas_per_block: MAX_GAS_PER_BLOCK, channel_size: CHANNEL_SIZE, bootstrap_part_size: CONSENSUS_BOOTSTRAP_PART_SIZE, @@ -574,6 +636,9 @@ async fn launch( .consensus .broadcast_filled_blocks_channel_capacity, last_start_period: final_state.read().last_start_period, + force_keep_final_periods_without_ops: SETTINGS + .consensus + .force_keep_final_periods_without_ops, }; let (consensus_event_sender, consensus_event_receiver) = @@ -609,6 +674,7 @@ async fn launch( pool_controller.clone(), shared_storage.clone(), protocol_channels, + mip_store.clone(), ) .expect("could not start protocol controller"); @@ -632,7 +698,12 @@ async fn launch( protocol: protocol_controller.clone(), storage: shared_storage.clone(), }; - let factory_manager = start_factory(factory_config, node_wallet.clone(), factory_channels); + let factory_manager = start_factory( + factory_config, + node_wallet.clone(), + factory_channels, + mip_store.clone(), + ); let bootstrap_manager = bootstrap_config.listen_addr.map(|addr| { let (waker, listener) = BootstrapTcpListener::new(&addr).unwrap_or_else(|_| { @@ -649,7 +720,6 @@ async fn launch( bootstrap_config, keypair.clone(), *VERSION, - mip_store.clone(), ) .expect("Could not start bootstrap server"); manager.set_listener_stopper(waker); @@ -782,6 +852,7 @@ async fn launch( storage: shared_storage.clone(), grpc_config: grpc_config.clone(), version: *VERSION, + mip_store: mip_store.clone(), }; // HACK maybe should remove timeout later @@ -834,6 +905,7 @@ async fn launch( *VERSION, node_id, shared_storage.clone(), + mip_store.clone(), ); let api_public_handle = api_public .serve(&SETTINGS.api.bind_public, &api_config) @@ -1067,6 +1139,21 @@ async fn run(args: Args) -> anyhow::Result<()> { &SETTINGS.factory.staking_wallet_path, )?; + // interrupt signal listener + let sig_int_toggled = Arc::new((Mutex::new(false), Condvar::new())); + + // TODO: re-enable and fix this (remove use ctrlc as _; when done) + // let sig_int_toggled_clone = Arc::clone(&sig_int_toggled); + // currently used by the bootstrap client to break out of the to preempt the retry wait + // ctrlc::set_handler(move || { + // *sig_int_toggled_clone + // .0 + // .lock() + // .expect("double-lock on interupt bool in ctrl-c handler") = true; + // sig_int_toggled_clone.1.notify_all(); + // }) + // .expect("Error setting Ctrl-C handler"); + loop { let ( consensus_event_receiver, @@ -1082,7 +1169,7 @@ async fn run(args: Args) -> anyhow::Result<()> { api_public_handle, api_handle, grpc_handle, - ) = launch(&cur_args, node_wallet.clone()).await; + ) = launch(&cur_args, node_wallet.clone(), Arc::clone(&sig_int_toggled)).await; // interrupt signal listener let (tx, rx) = crossbeam_channel::bounded(1); diff --git a/massa-node/src/settings.rs b/massa-node/src/settings.rs index fb2c4691ca7..19d94c68356 100644 --- a/massa-node/src/settings.rs +++ b/massa-node/src/settings.rs @@ -141,22 +141,18 @@ pub struct Settings { pub struct ConsensusSettings { /// Maximum number of blocks allowed in discarded blocks. pub max_discarded_blocks: usize, - /// If a block is `future_block_processing_max_periods` periods in the future, it is just discarded. - pub future_block_processing_max_periods: u64, /// Maximum number of blocks allowed in `FutureIncomingBlocks`. pub max_future_processing_blocks: usize, /// Maximum number of blocks allowed in `DependencyWaitingBlocks`. pub max_dependency_blocks: usize, /// stats time span pub stats_timespan: MassaTime, - /// max event send wait - pub max_send_wait: MassaTime, /// force keep at least this number of final periods in RAM for each thread pub force_keep_final_periods: u64, + /// force keep at least this number of final periods without operations in RAM for each thread + pub force_keep_final_periods_without_ops: u64, /// old blocks are pruned every `block_db_prune_interval` pub block_db_prune_interval: MassaTime, - /// max number of items returned while querying - pub max_item_return_count: usize, /// blocks headers channel capacity pub broadcast_blocks_headers_channel_capacity: usize, /// blocks channel capacity diff --git a/massa-node/src/tests/config.toml b/massa-node/src/tests/config.toml index 38dd973140d..f99e3276b36 100644 --- a/massa-node/src/tests/config.toml +++ b/massa-node/src/tests/config.toml @@ -14,15 +14,12 @@ [consensus] max_discarded_blocks = 100 - future_block_processing_max_periods = 100 max_future_processing_blocks = 400 max_dependency_blocks = 2048 - max_send_wait = 500 force_keep_final_periods = 20 staking_wallet_path = "../massa-node/config/staking_keys.json" stats_timespan = 60000 block_db_prune_interval = 5000 - max_item_return_count = 100 genesis_timestamp = 1638931299263 end_timestammp = 1638931299263 diff --git a/massa-pool-exports/Cargo.toml b/massa-pool-exports/Cargo.toml index ad55ec55bf7..925dc5a737f 100644 --- a/massa-pool-exports/Cargo.toml +++ b/massa-pool-exports/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_pool_exports" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" diff --git a/massa-pool-exports/src/config.rs b/massa-pool-exports/src/config.rs index 2fb6298cc5e..2a72cba04f9 100644 --- a/massa-pool-exports/src/config.rs +++ b/massa-pool-exports/src/config.rs @@ -25,8 +25,12 @@ pub struct PoolConfig { pub max_endorsements_pool_size_per_thread: usize, /// max number of endorsements per block pub max_block_endorsement_count: u32, - /// operations and endorsements communication channels size - pub channels_size: usize, + /// operations channel capacity + pub operations_channel_size: usize, + /// endorsements channel capacity + pub endorsements_channel_size: usize, + /// denunciations channel capacity + pub denunciations_channel_size: usize, /// whether operations broadcast is enabled pub broadcast_enabled: bool, /// endorsements channel capacity diff --git a/massa-pool-exports/src/test_exports/config.rs b/massa-pool-exports/src/test_exports/config.rs index 75079879e7f..b2dd058cce2 100644 --- a/massa-pool-exports/src/test_exports/config.rs +++ b/massa-pool-exports/src/test_exports/config.rs @@ -20,7 +20,9 @@ impl Default for PoolConfig { max_endorsements_pool_size_per_thread: 1000, max_operations_per_block: MAX_OPERATIONS_PER_BLOCK, max_block_endorsement_count: ENDORSEMENT_COUNT, - channels_size: 1024, + operations_channel_size: 1024, + endorsements_channel_size: 1024, + denunciations_channel_size: 1024, broadcast_enabled: false, broadcast_endorsements_channel_capacity: 2000, broadcast_operations_channel_capacity: 5000, diff --git a/massa-pool-worker/Cargo.toml b/massa-pool-worker/Cargo.toml index 6df06039603..a7fc65b5e4f 100644 --- a/massa-pool-worker/Cargo.toml +++ b/massa-pool-worker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_pool_worker" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -18,6 +18,7 @@ massa_pos_exports = { path = "../massa-pos-exports" } [dev-dependencies] tokio = { version = "1.23", features = ["sync"] } +mockall = "0.11.4" massa_signature = { path = "../massa-signature" } massa_hash = { path = "../massa-hash" } massa_pool_exports = { path = "../massa-pool-exports", features = [ "testing" ] } @@ -26,8 +27,4 @@ crossbeam-channel = { version = "0.5" } # for more information on what are the following features used for, see the cargo.toml at workspace level [features] -testing = [ - "massa_pool_exports/testing", - "massa_execution_exports/testing", - "massa_pos_exports/testing", -] +testing = ["massa_pool_exports/testing", "massa_execution_exports/testing", "massa_pos_exports/testing"] diff --git a/massa-pool-worker/src/denunciation_pool.rs b/massa-pool-worker/src/denunciation_pool.rs index 57a29ae8d83..42c9d68d45e 100644 --- a/massa-pool-worker/src/denunciation_pool.rs +++ b/massa-pool-worker/src/denunciation_pool.rs @@ -115,8 +115,9 @@ impl DenunciationPool { match selected { Ok(selection) => { if let Some(address) = selection.endorsements.get(de_p.index as usize) { - if *address != Address::from_public_key(&de_p.public_key) { - debug!("Denunciation pool received a secure share endorsement but address was not selected"); + let a = Address::from_public_key(&de_p.public_key); + if *address != a { + debug!("Denunciation pool received a secure share endorsement but address was not selected: received {} but expected {} ({})", address, a, de_p.public_key); return; } } else { diff --git a/massa-pool-worker/src/tests/operation_pool_tests.rs b/massa-pool-worker/src/tests/operation_pool_tests.rs index 7e2dde8f742..5b5b9c77925 100644 --- a/massa-pool-worker/src/tests/operation_pool_tests.rs +++ b/massa-pool-worker/src/tests/operation_pool_tests.rs @@ -19,10 +19,12 @@ //! use crate::tests::tools::OpGenerator; -use super::tools::{create_some_operations, operation_pool_test, pool_test}; -use massa_execution_exports::test_exports::MockExecutionControllerMessage; +use super::tools::{create_some_operations, operation_pool_test, PoolTestBoilerPlate}; +use massa_execution_exports::MockExecutionController; use massa_models::{amount::Amount, operation::OperationId, slot::Slot}; use massa_pool_exports::PoolConfig; +use massa_pos_exports::MockSelectorController; +use mockall::Sequence; use std::time::Duration; #[test] @@ -58,167 +60,178 @@ fn test_add_irrelevant_operation() { #[test] fn test_pool() { let pool_config = PoolConfig::default(); - pool_test( - pool_config, - |mut pool_manager, mut pool, execution_receiver, _selector_receiver, storage_base| { - // generate (id, transactions, range of validity) by threads - let mut thread_tx_lists = vec![Vec::new(); pool_config.thread_count as usize]; - - let mut storage = storage_base.clone_without_refs(); - for i in 0..18 { - let expire_period: u64 = 10 + i; - let op = OpGenerator::default() - .expirery(expire_period) - .fee(Amount::from_raw(40 + i)) - .generate(); //get_transaction(expire_period, fee); - - storage.store_operations(vec![op.clone()]); - - //TODO: compare - // assert_eq!(storage.get_op_refs(), &Set::::default()); - - // duplicate - // let mut storage = storage_base.clone_without_refs(); - // storage.store_operations(vec![op.clone()]); - // pool.add_operations(storage); - //TODO: compare - //assert_eq!(storage.get_op_refs(), &ops.keys().copied().collect::>()); - - let op_thread = op - .content_creator_address - .get_thread(pool_config.thread_count); - - let start_period = - expire_period.saturating_sub(pool_config.operation_validity_periods); - - thread_tx_lists[op_thread as usize].push((op, start_period..=expire_period)); - } - - pool.add_operations(storage); - // Allow some time for the pool to add the operations - std::thread::sleep(Duration::from_millis(200)); - - // sort from bigger fee to smaller and truncate - for lst in thread_tx_lists.iter_mut() { - lst.reverse(); - lst.truncate(pool_config.max_operation_pool_size_per_thread); - } - - std::thread::spawn(move || loop { - match execution_receiver.recv_timeout(Duration::from_millis(2000)) { - // forward on the operations - Ok(MockExecutionControllerMessage::UnexecutedOpsAmong { - ops, - response_tx, - .. - }) => { - response_tx.send(ops).unwrap(); - } - // we want the operations to be paid for... - Ok(MockExecutionControllerMessage::GetFinalAndCandidateBalance { - response_tx, - .. - }) => response_tx - .send(vec![( + let execution_controller = { + let mut res = Box::new(MockExecutionController::new()); + res.expect_clone_box().return_once(|| { + let mut story = MockExecutionController::new(); + let mut seq = Sequence::new(); + for _ in 0..198 { + story + .expect_unexecuted_ops_among() + .times(1) + .returning(|ops, _| ops.clone()) + .in_sequence(&mut seq); + story + .expect_get_final_and_candidate_balance() + .times(1) + .returning(|_| { + vec![( + // Operations need to be paid for Some(Amount::from_raw(60 * 1_000_000_000)), Some(Amount::from_raw(60 * 1_000_000_000)), - )]) - .unwrap(), - _ => {} - } - }); - - // checks ops are the expected ones for thread 0 and 1 and various periods - for thread in 0u8..pool_config.thread_count { - for period in 0u64..70 { - let target_slot = Slot::new(period, thread); - let (ids, storage) = pool.get_block_operations(&target_slot); - - assert_eq!( - ids.iter() - .map(|id| ( - *id, - storage - .read_operations() - .get(id) - .unwrap() - .serialized_data - .clone() - )) - .collect::)>>(), - thread_tx_lists[target_slot.thread as usize] - .iter() - .filter(|(_, r)| r.contains(&target_slot.period)) - .map(|(op, _)| (op.id, op.serialized_data.clone())) - .collect::)>>() - ); - } - } - - // op ending before or at period 45 won't appear in the block due to incompatible validity range - // we don't keep them as expected ops - let final_period = 45u64; - pool.notify_final_cs_periods(&vec![final_period; pool_config.thread_count as usize]); - // Wait for pool to manage the above command - std::thread::sleep(Duration::from_millis(200)); - for lst in thread_tx_lists.iter_mut() { - lst.retain(|(op, _)| op.content.expire_period > final_period); - } - - // checks ops are the expected ones for thread 0 and 1 and various periods - for thread in 0u8..pool_config.thread_count { - for period in 0u64..70 { - let target_slot = Slot::new(period, thread); - let max_count = 4; - let (ids, storage) = pool.get_block_operations(&target_slot); - assert_eq!( - ids.iter() - .map(|id| ( - *id, - storage - .read_operations() - .get(id) - .unwrap() - .serialized_data - .clone() - )) - .collect::)>>(), - thread_tx_lists[target_slot.thread as usize] - .iter() - .filter(|(_, r)| r.contains(&target_slot.period)) - .take(max_count) - .map(|(op, _)| (op.id, op.serialized_data.clone())) - .collect::)>>() - ); - } - } - - // add transactions with a high fee but too much in the future: should be ignored - { - //TODO: update current slot - //pool.update_current_slot(Slot::new(10, 0)); - let expire_period: u64 = 300; - let op = OpGenerator::default() - .expirery(expire_period) - .fee(Amount::from_raw(1000)) - .generate(); - let mut storage = storage_base.clone_without_refs(); - storage.store_operations(vec![op.clone()]); - pool.add_operations(storage); - // Allow some time for the pool to add the operations - std::thread::sleep(Duration::from_millis(100)); - //TODO: compare - //assert_eq!(storage.get_op_refs(), &Set::::default()); - let op_thread = op - .content_creator_address - .get_thread(pool_config.thread_count); - let (ids, _) = pool.get_block_operations(&Slot::new( - expire_period - pool_config.operation_validity_periods - 1, - op_thread, - )); - assert!(ids.is_empty()); + )] + }) + .in_sequence(&mut seq); } - pool_manager.stop(); - }, - ); + Box::new(story) + }); + res + }; + + let selector_controller = { + let mut res = Box::new(MockSelectorController::new()); + res.expect_clone_box() + .times(3) + .returning(|| Box::new(MockSelectorController::new())); + res + }; + + let PoolTestBoilerPlate { + mut pool_manager, + mut pool_controller, + storage: storage_base, + } = PoolTestBoilerPlate::pool_test(pool_config, execution_controller, selector_controller); + + // generate (id, transactions, range of validity) by threads + let mut thread_tx_lists = vec![Vec::new(); pool_config.thread_count as usize]; + + let mut storage = storage_base.clone_without_refs(); + for i in 0..18 { + let expire_period: u64 = 10 + i; + let op = OpGenerator::default() + .expirery(expire_period) + .fee(Amount::from_raw(40 + i)) + .generate(); //get_transaction(expire_period, fee); + + storage.store_operations(vec![op.clone()]); + + //TODO: compare + // assert_eq!( + // storage.get_op_refs(), + // &massa_models::prehash::PreHashSet::::default() + // ); + + // duplicate + // let mut storage = storage_base.clone_without_refs(); + // storage.store_operations(vec![op.clone()]); + // pool.add_operations(storage); + //TODO: compare + //assert_eq!(storage.get_op_refs(), &ops.keys().copied().collect::>()); + + let op_thread = op + .content_creator_address + .get_thread(pool_config.thread_count); + + let start_period = expire_period.saturating_sub(pool_config.operation_validity_periods); + + thread_tx_lists[op_thread as usize].push((op, start_period..=expire_period)); + } + + pool_controller.add_operations(storage); + + // sort from bigger fee to smaller and truncate + for lst in thread_tx_lists.iter_mut() { + lst.reverse(); + lst.truncate(pool_config.max_operation_pool_size_per_thread); + } + + // checks ops are the expected ones for thread 0 and 1 and various periods + for thread in 0u8..pool_config.thread_count { + for period in 0u64..70 { + let target_slot = Slot::new(period, thread); + let (ids, storage) = pool_controller.get_block_operations(&target_slot); + + assert_eq!( + ids.iter() + .map(|id| ( + *id, + storage + .read_operations() + .get(id) + .unwrap() + .serialized_data + .clone() + )) + .collect::)>>(), + thread_tx_lists[target_slot.thread as usize] + .iter() + .filter(|(_, r)| r.contains(&target_slot.period)) + .map(|(op, _)| (op.id, op.serialized_data.clone())) + .collect::)>>() + ); + } + } + + // op ending before or at period 45 won't appear in the block due to incompatible validity range + // we don't keep them as expected ops + let final_period = 45u64; + pool_controller.notify_final_cs_periods(&vec![final_period; pool_config.thread_count as usize]); + + for lst in thread_tx_lists.iter_mut() { + lst.retain(|(op, _)| op.content.expire_period > final_period); + } + + // checks ops are the expected ones for thread 0 and 1 and various periods + for thread in 0u8..pool_config.thread_count { + for period in 0u64..70 { + let target_slot = Slot::new(period, thread); + let max_count = 4; + let (ids, storage) = pool_controller.get_block_operations(&target_slot); + assert_eq!( + ids.iter() + .map(|id| ( + *id, + storage + .read_operations() + .get(id) + .unwrap() + .serialized_data + .clone() + )) + .collect::)>>(), + thread_tx_lists[target_slot.thread as usize] + .iter() + .filter(|(_, r)| r.contains(&target_slot.period)) + .take(max_count) + .map(|(op, _)| (op.id, op.serialized_data.clone())) + .collect::)>>() + ); + } + } + + // add transactions with a high fee but too much in the future: should be ignored + { + //TODO: update current slot + //pool_controller.update_current_slot(Slot::new(10, 0)); + let expire_period: u64 = 300; + let op = OpGenerator::default() + .expirery(expire_period) + .fee(Amount::from_raw(1000)) + .generate(); + let mut storage = storage_base.clone_without_refs(); + storage.store_operations(vec![op.clone()]); + pool_controller.add_operations(storage); + + //TODO: compare + //assert_eq!(storage.get_op_refs(), &Set::::default()); + let op_thread = op + .content_creator_address + .get_thread(pool_config.thread_count); + let (ids, _) = pool_controller.get_block_operations(&Slot::new( + expire_period - pool_config.operation_validity_periods - 1, + op_thread, + )); + assert!(ids.is_empty()); + } + pool_manager.stop(); } diff --git a/massa-pool-worker/src/tests/scenario.rs b/massa-pool-worker/src/tests/scenario.rs index 8b11d84df48..1474ae6ef6b 100644 --- a/massa-pool-worker/src/tests/scenario.rs +++ b/massa-pool-worker/src/tests/scenario.rs @@ -12,16 +12,13 @@ //! Same as the previous test with a low limit of size to check if //! configurations are taken into account. -use std::sync::mpsc::Receiver; -use std::thread; +use mockall::Sequence; +use std::rc::Rc; use std::time::Duration; use crate::tests::tools::create_some_operations; -use crate::tests::tools::pool_test; use crate::tests::tools::OpGenerator; -use massa_execution_exports::test_exports::{ - MockExecutionControllerMessage as ControllerMsg, MockExecutionControllerMessage, -}; +use massa_execution_exports::MockExecutionController; use massa_models::address::Address; use massa_models::amount::Amount; use massa_models::denunciation::{Denunciation, DenunciationIndex, DenunciationPrecursor}; @@ -32,113 +29,107 @@ use massa_models::test_exports::{ gen_block_headers_for_denunciation, gen_endorsements_for_denunciation, }; use massa_pool_exports::PoolConfig; -use massa_pos_exports::test_exports::MockSelectorControllerMessage; +use massa_pos_exports::MockSelectorController; use massa_pos_exports::{PosResult, Selection}; use massa_signature::KeyPair; +use super::tools::PoolTestBoilerPlate; + /// # Test simple get operation /// Just try to get some operations stored in pool /// /// ## Initialization /// Insert multiple operations in the pool. (10) /// -/// Start mocked execution controller thread. (expected 2 calls of `unexecuted_ops_among` -/// that return the full storage) -/// The execution thread will response that no operations had been executed. -/// -/// ## Expected results -/// The execution controller is expected to be asked 2 times for the first interaction: -/// - to check the already executed operations -/// - to check the final and candidate balances of the creator address -/// And one time for the 9 next to check the executed operations. +/// Create a mock-execution-controller story: +/// 1. unexpected_opse_among, returning the op-ids of what gets inserted into the pool +/// 2. get_final_and_candidate_balance, returning 1, 1 +/// 3. repeat #1 9 times /// /// The block operation storage built for all threads is expected to have the /// same length than those added previously. #[test] fn test_simple_get_operations() { + // Setup the execution story. + let keypair = KeyPair::generate(0).unwrap(); + let addr = Address::from_public_key(&keypair.get_public_key()).clone(); + + // setup operations + let op_gen = OpGenerator::default().creator(keypair.clone()).expirery(1); + let ops = create_some_operations(10, &op_gen); + let mock_owned_ops = ops.iter().map(|op| op.id).collect(); + + let mut execution_controller = Box::new(MockExecutionController::new()); + execution_controller.expect_clone_box().returning(move || { + Box::new(create_basic_get_block_operation_execution_mock( + 10, + addr, + vec![(Some(Amount::from_raw(1)), Some(Amount::from_raw(1)))], + &mock_owned_ops, + )) + }); + + // Provide the selector boilderplate + let mut selector_controller = Box::new(MockSelectorController::new()); + selector_controller + .expect_clone_box() + .returning(|| Box::new(MockSelectorController::new())); + + // Setup the pool controller let config = PoolConfig::default(); - pool_test( - config, - |mut pool_manager, - mut pool_controller, - execution_receiver, - _selector_receiver, - mut storage| { - //setup meta-data - let keypair = KeyPair::generate(); - let op_gen = OpGenerator::default().creator(keypair.clone()).expirery(1); - let creator_address = Address::from_public_key(&keypair.get_public_key()); - let creator_thread = creator_address.get_thread(config.thread_count); - - // setup storage - storage.store_operations(create_some_operations(10, &op_gen)); - let unexecuted_ops = storage.get_op_refs().clone(); - pool_controller.add_operations(storage); - // Allow some time for the pool to add the operations - std::thread::sleep(Duration::from_millis(100)); - - // Start mock execution thread. - // Provides the data for `pool_controller.get_block_operations` - launch_basic_get_block_operation_execution_mock( - 10, - unexecuted_ops, - execution_receiver, - creator_address, - vec![(Some(Amount::from_raw(1)), Some(Amount::from_raw(1)))], - ); - // This is what we are testing.... - let block_operations_storage = pool_controller - .get_block_operations(&Slot::new(1, creator_thread)) - .1; + let PoolTestBoilerPlate { + mut pool_manager, + mut pool_controller, + mut storage, + } = PoolTestBoilerPlate::pool_test(config, execution_controller, selector_controller); - pool_manager.stop(); + // setup storage + storage.store_operations(ops); + pool_controller.add_operations(storage); - assert_eq!(block_operations_storage.get_op_refs().len(), 10); - }, - ); + // Allow some time for the pool to add the operations + std::thread::sleep(Duration::from_millis(100)); + + let creator_thread = { + let creator_address = Address::from_public_key(&keypair.get_public_key()); + creator_address.get_thread(config.thread_count) + }; + // This is what we are testing.... + let block_operations_storage = pool_controller + .get_block_operations(&Slot::new(1, creator_thread)) + .1; + + pool_manager.stop(); + + assert_eq!(block_operations_storage.get_op_refs().len(), 10); } -/// Launch a default mock for execution controller on call `get_block_operation` API. -pub fn launch_basic_get_block_operation_execution_mock( +/// Create default mock-story for execution controller on call `get_block_operation` API. +pub fn create_basic_get_block_operation_execution_mock( operations_len: usize, - unexecuted_ops: PreHashSet, - recvr: Receiver, creator_address: Address, balance_vec: Vec<(Option, Option)>, -) { - let receive = |er: &Receiver| er.recv_timeout(Duration::from_millis(100)); - std::thread::spawn(move || { - match receive(&recvr) { - Ok(ControllerMsg::UnexecutedOpsAmong { response_tx, .. }) => { - response_tx.send(unexecuted_ops.clone()).unwrap() - } - Ok(op) => panic!("Expected `ControllerMsg::UnexecutedOpsAmong`, got {:?}", op), - Err(_) => panic!("execution never called"), - } - match receive(&recvr) { - Ok(ControllerMsg::GetFinalAndCandidateBalance { - addresses, - response_tx, - .. - }) => { - assert_eq!(addresses.len(), 1); - assert_eq!(addresses[0], creator_address); - response_tx.send(balance_vec).unwrap(); - } - Ok(op) => panic!( - "Expected `ControllerMsg::GetFinalAndCandidateBalance`, got {:?}", - op - ), - Err(_) => panic!("execution never called"), - } - - (1..operations_len).for_each(|_| { - if let Ok(ControllerMsg::UnexecutedOpsAmong { response_tx, .. }) = receive(&recvr) { - response_tx.send(unexecuted_ops.clone()).unwrap(); - } - }) - }); + ops: &PreHashSet, +) -> MockExecutionController { + let mut res = MockExecutionController::new(); + let mut seq = Sequence::new(); + let ops1 = ops.clone(); + let ops2 = Rc::new(ops.clone()); + res.expect_unexecuted_ops_among() + .times(1) + .return_once(|_, _| ops1) + .in_sequence(&mut seq); + res.expect_get_final_and_candidate_balance() + .times(1) + .return_once(|_| balance_vec) + .withf(move |addrs| addrs.len() == 1 && addrs[0] == creator_address) + .in_sequence(&mut seq); + res.expect_unexecuted_ops_among() + .times(operations_len - 1) + .returning_st(move |_, _| (&*ops2).clone()) + .in_sequence(&mut seq); + res } /// # Test get block operation with overflow @@ -160,52 +151,54 @@ fn test_get_operations_overflow() { // setup metadata static OP_LEN: usize = 10; static MAX_OP_LEN: usize = 5; - let mut max_block_size = 0; - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let creator_address = Address::from_public_key(&keypair.get_public_key()); let op_gen = OpGenerator::default().expirery(1).creator(keypair); let operations = create_some_operations(OP_LEN, &op_gen); - operations + let max_block_size = operations .iter() .take(MAX_OP_LEN) - .for_each(|op| max_block_size += op.serialized_size() as u32); + .fold(0, |acc, op| acc + op.serialized_size() as u32); let config = PoolConfig { max_block_size, ..Default::default() }; let creator_thread = creator_address.get_thread(config.thread_count); - pool_test( - config, - |mut pool_manager, - mut pool_controller, - execution_receiver, - _selector_receiver, - mut storage| { - // setup storage - storage.store_operations(operations); - let unexecuted_ops = storage.get_op_refs().clone(); - pool_controller.add_operations(storage); - // Allow some time for the pool to add the operations - std::thread::sleep(Duration::from_millis(100)); - - // start mock execution thread - launch_basic_get_block_operation_execution_mock( - OP_LEN, - unexecuted_ops, - execution_receiver, - creator_address, - vec![(Some(Amount::from_raw(1)), Some(Amount::from_raw(1)))], - ); - let block_operations_storage = pool_controller - .get_block_operations(&Slot::new(1, creator_thread)) - .1; - - pool_manager.stop(); + let mut execution_controller = Box::new(MockExecutionController::new()); + execution_controller.expect_clone_box().returning(move || { + Box::new(create_basic_get_block_operation_execution_mock( + MAX_OP_LEN, + creator_address, + vec![(Some(Amount::from_raw(1)), Some(Amount::from_raw(1)))], + &operations.iter().take(MAX_OP_LEN).map(|op| op.id).collect(), + )) + }); - assert_eq!(block_operations_storage.get_op_refs().len(), MAX_OP_LEN); - }, - ); + // Provide the selector boilderplate + let mut selector_controller = Box::new(MockSelectorController::new()); + selector_controller + .expect_clone_box() + .returning(|| Box::new(MockSelectorController::new())); + + let PoolTestBoilerPlate { + mut pool_manager, + mut pool_controller, + mut storage, + } = PoolTestBoilerPlate::pool_test(config, execution_controller, selector_controller); + + storage.store_operations(create_some_operations(10, &op_gen)); + pool_controller.add_operations(storage); + // Allow some time for the pool to add the operations + std::thread::sleep(Duration::from_millis(100)); + + // This is what we are testing.... + let block_operations_storage = pool_controller + .get_block_operations(&Slot::new(1, creator_thread)) + .1; + pool_manager.stop(); + + assert_eq!(block_operations_storage.get_op_refs().len(), MAX_OP_LEN); } #[test] @@ -221,42 +214,34 @@ fn test_block_header_denunciation_creation() { let denunciation_orig = Denunciation::try_from((&secured_header_1, &secured_header_2)).unwrap(); let config = PoolConfig::default(); - pool_test( - config, - |mut pool_manager, pool_controller, _execution_receiver, selector_receiver, _storage| { - pool_controller.add_denunciation_precursor(de_p_1); - pool_controller.add_denunciation_precursor(de_p_2); - // Allow some time for the pool to add the operations - loop { - match selector_receiver.recv_timeout(Duration::from_millis(100)) { - Ok(MockSelectorControllerMessage::GetProducer { - slot: _slot, - response_tx, - }) => { - response_tx.send(PosResult::Ok(address)).unwrap(); - } - Ok(msg) => { - panic!( - "Received an unexpected message from mock selector: {:?}", - msg - ); - } - Err(_e) => { - // timeout - break; - } - } - } - - assert_eq!(pool_controller.get_denunciation_count(), 1); - assert_eq!( - pool_controller.contains_denunciation(&denunciation_orig), - true - ); - - pool_manager.stop(); - }, + // let mut selector_controller = Box::new(MockSelectorController::new()); + let mut res = MockSelectorController::new(); + res.expect_get_producer() + .times(2) + .returning(move |_| PosResult::Ok(address)); + let selector_controller = pool_test_mock_selector_controller(res); + + let mut execution_controller = Box::new(MockExecutionController::new()); + execution_controller + .expect_clone_box() + .returning(move || Box::new(MockExecutionController::new())); + let PoolTestBoilerPlate { + mut pool_manager, + pool_controller, + storage: _storage, + } = PoolTestBoilerPlate::pool_test(config, execution_controller, selector_controller); + + pool_controller.add_denunciation_precursor(de_p_1); + pool_controller.add_denunciation_precursor(de_p_2); + std::thread::sleep(Duration::from_millis(100)); + + assert_eq!(pool_controller.get_denunciation_count(), 1); + assert_eq!( + pool_controller.contains_denunciation(&denunciation_orig), + true ); + + pool_manager.stop(); } #[test] @@ -272,37 +257,30 @@ fn test_endorsement_denunciation_creation() { let denunciation_orig = Denunciation::try_from((&s_endorsement_1, &s_endorsement_2)).unwrap(); let config = PoolConfig::default(); - pool_test( - config, - |mut pool_manager, pool_controller, _execution_receiver, selector_receiver, _storage| { + { + let mut execution_controller = Box::new(MockExecutionController::new()); + execution_controller + .expect_clone_box() + .returning(move || Box::new(MockExecutionController::new())); + + let mut res = MockSelectorController::new(); + res.expect_get_selection().times(2).returning(move |_| { + PosResult::Ok(Selection { + endorsements: vec![address; usize::from(config.thread_count)], + producer: address, + }) + }); + let selector_controller = pool_test_mock_selector_controller(res); + let PoolTestBoilerPlate { + mut pool_manager, + pool_controller, + storage: _storage, + } = PoolTestBoilerPlate::pool_test(config, execution_controller, selector_controller); + + { pool_controller.add_denunciation_precursor(de_p_1); pool_controller.add_denunciation_precursor(de_p_2); - // Allow some time for the pool to add the operations - loop { - match selector_receiver.recv_timeout(Duration::from_millis(100)) { - Ok(MockSelectorControllerMessage::GetSelection { - slot: _slot, - response_tx, - }) => { - let selection = Selection { - endorsements: vec![address; usize::from(config.thread_count)], - producer: address, - }; - - response_tx.send(PosResult::Ok(selection)).unwrap(); - } - Ok(msg) => { - panic!( - "Received an unexpected message from mock selector: {:?}", - msg - ); - } - Err(_e) => { - // timeout - break; - } - } - } + std::thread::sleep(Duration::from_millis(200)); assert_eq!(pool_controller.get_denunciation_count(), 1); assert_eq!( @@ -311,8 +289,8 @@ fn test_endorsement_denunciation_creation() { ); pool_manager.stop(); - }, - ); + } + }; } #[test] @@ -338,94 +316,79 @@ fn test_denunciation_pool_get() { let de_idx_2 = DenunciationIndex::from(&denunciation_orig_2); let config = PoolConfig::default(); - pool_test( - config, - |mut pool_manager, pool_controller, execution_receiver, selector_receiver, _storage| { - // ~ random order (but need to keep the precursor order otherwise Denunciation::PartialEq will fail) - pool_controller.add_denunciation_precursor(de_p_3); - pool_controller.add_denunciation_precursor(de_p_1); - pool_controller.add_denunciation_precursor(de_p_4); - pool_controller.add_denunciation_precursor(de_p_2); + let execution_controller = { + let mut res = Box::new(MockExecutionController::new()); + res.expect_clone_box() + .return_once(move || Box::new(MockExecutionController::new())); + + res.expect_is_denunciation_executed() + .times(2) + .returning(move |de_idx| de_idx != &de_idx_2); + res + }; - // Allow some time for the pool to add the operations - loop { - match selector_receiver.recv_timeout(Duration::from_millis(100)) { - Ok(MockSelectorControllerMessage::GetProducer { - slot: _slot, - response_tx, - }) => { - response_tx.send(PosResult::Ok(address_2)).unwrap(); - } - Ok(MockSelectorControllerMessage::GetSelection { - slot: _slot, - response_tx, - }) => { - let selection = Selection { - endorsements: vec![address_1; usize::from(config.thread_count)], - producer: address_1, - }; - - response_tx.send(PosResult::Ok(selection)).unwrap(); - } - Ok(msg) => { - panic!( - "Received an unexpected message from mock selector: {:?}", - msg - ); - } - Err(_) => { - // timeout, exit the loop - break; - } - } - } - - assert_eq!(pool_controller.get_denunciation_count(), 2); - assert_eq!( - pool_controller.contains_denunciation(&denunciation_orig_1), - true - ); - assert_eq!( - pool_controller.contains_denunciation(&denunciation_orig_2), - true - ); + let selector_controller = { + let mut res = MockSelectorController::new(); + res.expect_get_producer() + .times(2) + .returning(move |_| PosResult::Ok(address_2)); + res.expect_get_selection().times(2).returning(move |_| { + PosResult::Ok(Selection { + endorsements: vec![address_1; usize::from(config.thread_count)], + producer: address_1, + }) + }); + pool_test_mock_selector_controller(res) + }; - // Now ask for denunciations - // Note that we need 2 threads as the get_block_denunciations call will wait for - // the mock execution controller to return - - let target_slot_1 = Slot::new(4, 0); - let thread_1 = thread::spawn(move || loop { - match execution_receiver.recv_timeout(Duration::from_millis(100)) { - Ok(MockExecutionControllerMessage::IsDenunciationExecuted { - de_idx, - response_tx, - }) => { - // Note: this should prevent denunciation_orig_1 to be included - if de_idx == de_idx_2 { - response_tx.send(false).unwrap(); - } else { - response_tx.send(true).unwrap(); - } - } - Ok(msg) => { - panic!( - "Received an unexpected message from mock execution: {:?}", - msg - ); - } - Err(_) => break, - } - }); - let thread_2 = - thread::spawn(move || pool_controller.get_block_denunciations(&target_slot_1)); - - thread_1.join().unwrap(); - let denunciations = thread_2.join().unwrap(); - - assert_eq!(denunciations, vec![denunciation_orig_2]); + let PoolTestBoilerPlate { + mut pool_manager, + pool_controller, + storage: _storage, + } = PoolTestBoilerPlate::pool_test(config, execution_controller, selector_controller); - pool_manager.stop(); - }, - ) + // And so begins the test + { + // ~ random order (but need to keep the precursor order otherwise Denunciation::PartialEq will fail) + pool_controller.add_denunciation_precursor(de_p_3); + pool_controller.add_denunciation_precursor(de_p_1); + pool_controller.add_denunciation_precursor(de_p_4); + pool_controller.add_denunciation_precursor(de_p_2); + + std::thread::sleep(Duration::from_millis(200)); + + assert_eq!(pool_controller.get_denunciation_count(), 2); + assert_eq!( + pool_controller.contains_denunciation(&denunciation_orig_1), + true + ); + assert_eq!( + pool_controller.contains_denunciation(&denunciation_orig_2), + true + ); + + let target_slot_1 = Slot::new(4, 0); + + let denunciations = pool_controller.get_block_denunciations(&target_slot_1); + + assert_eq!(denunciations, vec![denunciation_orig_2]); + + pool_manager.stop(); + } +} + +// The _actual_ story of the mock involves some clones that we don't want to worry about. +// This helper method means that tests need only concern themselves with the actual story. +pub fn pool_test_mock_selector_controller( + story: MockSelectorController, +) -> Box { + let mut selector_controller = Box::new(MockSelectorController::new()); + selector_controller + .expect_clone_box() + .times(2) + .returning(move || Box::new(MockSelectorController::new())); + selector_controller + .expect_clone_box() + .return_once(move || Box::new(story)); + selector_controller } diff --git a/massa-pool-worker/src/tests/tools.rs b/massa-pool-worker/src/tests/tools.rs index a6034ffebd6..6196d6fbef7 100644 --- a/massa-pool-worker/src/tests/tools.rs +++ b/massa-pool-worker/src/tests/tools.rs @@ -2,9 +2,7 @@ use crate::{operation_pool::OperationPool, start_pool_controller}; use crossbeam_channel as _; -use massa_execution_exports::test_exports::{ - MockExecutionController, MockExecutionControllerMessage, -}; +use massa_execution_exports::MockExecutionController; use massa_hash::Hash; use massa_models::{ address::Address, @@ -16,10 +14,9 @@ use massa_models::{ slot::Slot, }; use massa_pool_exports::{PoolChannels, PoolConfig, PoolController, PoolManager}; -use massa_pos_exports::test_exports::{MockSelectorController, MockSelectorControllerMessage}; +use massa_pos_exports::MockSelectorController as AutoMockSelectorController; use massa_signature::KeyPair; use massa_storage::Storage; -use std::sync::mpsc::Receiver; use tokio::sync::broadcast; #[derive(Default)] @@ -60,8 +57,14 @@ impl OpGenerator { } pub(crate) fn generate(&self) -> SecureShareOperation { - let creator = self.creator.clone().unwrap_or_else(KeyPair::generate); - let receiver = self.receiver.clone().unwrap_or_else(KeyPair::generate); + let creator = self + .creator + .clone() + .unwrap_or_else(|| KeyPair::generate(0).unwrap()); + let receiver = self + .receiver + .clone() + .unwrap_or_else(|| KeyPair::generate(0).unwrap()); let fee = self.fee.unwrap_or_default(); let amount = self.amount.unwrap_or_default(); let expirery = self.expirery.unwrap_or_default(); @@ -84,40 +87,38 @@ pub(crate) fn create_some_operations(n: usize, op_gen: &OpGenerator) -> Vec(cfg: PoolConfig, test: F) -where - F: FnOnce( - Box, - Box, - Receiver, - crossbeam_channel::Receiver, - Storage, - ), -{ - let storage: Storage = Storage::create_root(); - let endorsement_sender = broadcast::channel(2000).0; - let operation_sender = broadcast::channel(5000).0; - let (execution_controller, execution_receiver) = MockExecutionController::new_with_receiver(); - let (selector_controller, selector_receiver) = MockSelectorController::new_with_receiver(); - let (pool_manager, pool_controller) = start_pool_controller( - cfg, - &storage, - execution_controller, - PoolChannels { - endorsement_sender, - operation_sender, - selector: selector_controller, - }, - ); +pub struct PoolTestBoilerPlate { + pub pool_manager: Box, + pub pool_controller: Box, + pub storage: Storage, +} +impl PoolTestBoilerPlate { + /// Sets up a pool-system that can bu run, using the mocks-stories provided + pub fn pool_test( + cfg: PoolConfig, + execution_story: Box, + selector_story: Box, + ) -> Self { + let storage: Storage = Storage::create_root(); + let endorsement_sender = broadcast::channel(2000).0; + let operation_sender = broadcast::channel(5000).0; + let (pool_manager, pool_controller) = start_pool_controller( + cfg, + &storage, + execution_story, + PoolChannels { + endorsement_sender, + operation_sender, + selector: selector_story, + }, + ); - test( - pool_manager, - pool_controller, - execution_receiver, - selector_receiver, - storage, - ) + Self { + pool_manager, + pool_controller, + storage, + } + } } pub fn operation_pool_test(cfg: PoolConfig, test: F) @@ -126,8 +127,8 @@ where { let endorsement_sender = broadcast::channel(2000).0; let operation_sender = broadcast::channel(5000).0; - let (execution_controller, _) = MockExecutionController::new_with_receiver(); - let (selector_controller, _selector_receiver) = MockSelectorController::new_with_receiver(); + let execution_controller = Box::new(MockExecutionController::new()); + let selector_controller = Box::new(AutoMockSelectorController::new()); let storage = Storage::create_root(); test( OperationPool::init( @@ -146,7 +147,7 @@ where /// Creates an endorsement for use in pool tests. pub fn _create_endorsement(slot: Slot) -> SecureShareEndorsement { - let sender_keypair = KeyPair::generate(); + let sender_keypair = KeyPair::generate(0).unwrap(); let content = Endorsement { slot, diff --git a/massa-pool-worker/src/worker.rs b/massa-pool-worker/src/worker.rs index d24f2b78529..c9290c313ee 100644 --- a/massa-pool-worker/src/worker.rs +++ b/massa-pool-worker/src/worker.rs @@ -182,11 +182,12 @@ pub fn start_pool_controller( // denunciation_factory_tx: Sender, // denunciation_factory_rx: Receiver, ) -> (Box, Box) { - let (operations_input_sender, operations_input_receiver) = sync_channel(config.channels_size); + let (operations_input_sender, operations_input_receiver) = + sync_channel(config.operations_channel_size); let (endorsements_input_sender, endorsements_input_receiver) = - sync_channel(config.channels_size); + sync_channel(config.endorsements_channel_size); let (denunciations_input_sender, denunciations_input_receiver) = - sync_channel(config.channels_size); + sync_channel(config.denunciations_channel_size); let operation_pool = Arc::new(RwLock::new(OperationPool::init( config, storage, diff --git a/massa-pos-exports/Cargo.toml b/massa-pos-exports/Cargo.toml index a59f1bea4ad..da2514bcf00 100644 --- a/massa-pos-exports/Cargo.toml +++ b/massa-pos-exports/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_pos_exports" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -9,27 +9,28 @@ edition = "2021" [dependencies] bitvec = { version = "1.0", features = ["serde"] } displaydoc = "0.2" -nom = "7.1" +nom = "=7.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" -tokio = { version = "1.23", features = ["full"] } tracing = "0.1" -anyhow = "1.0" num = { version = "0.4", features = ["serde"] } parking_lot = { version = "0.12", features = ["deadlock_detection"] } crossbeam-channel = { version = "0.5", optional = true } +rocksdb = "0.20" +mockall = { version = "0.11.4", optional = true } + # custom modules massa_hash = { path = "../massa-hash" } massa_models = { path = "../massa-models" } -massa_time = { path = "../massa-time" } -massa_ledger_exports = { path = "../massa-ledger-exports" } massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } +massa_db = { path = "../massa-db" } + +[dev-dependencies] +mockall = "0.11.4" +tempfile = "3.3" # for more information on what are the following features used for, see the cargo.toml at workspace level [features] -testing = [ - "crossbeam-channel", - "massa_models/testing" -] +testing = ["crossbeam-channel", "massa_models/testing", "mockall"] diff --git a/massa-pos-exports/src/config.rs b/massa-pos-exports/src/config.rs index 5b31788cea1..364d4f0e5a4 100644 --- a/massa-pos-exports/src/config.rs +++ b/massa-pos-exports/src/config.rs @@ -9,6 +9,10 @@ pub struct PoSConfig { pub thread_count: u8, /// number of saved cycle pub cycle_history_length: usize, - /// maximum size of a deferred credits bootstrap part - pub credits_bootstrap_part_size: u64, + /// maximum rolls length + pub max_rolls_length: u64, + /// maximum production stats length + pub max_production_stats_length: u64, + /// maximum deferred credits length + pub max_credit_length: u64, } diff --git a/massa-pos-exports/src/controller_traits.rs b/massa-pos-exports/src/controller_traits.rs index 1860a775d1f..d002fc66e44 100644 --- a/massa-pos-exports/src/controller_traits.rs +++ b/massa-pos-exports/src/controller_traits.rs @@ -24,6 +24,7 @@ pub struct Selection { pub producer: Address, } +#[cfg_attr(any(test, feature = "testing"), mockall::automock)] /// interface that communicates with the selector worker thread pub trait SelectorController: Send + Sync { /// Waits for draws to reach at least a given cycle number. diff --git a/massa-pos-exports/src/cycle_info.rs b/massa-pos-exports/src/cycle_info.rs index b558034b6cb..3b84e579693 100644 --- a/massa-pos-exports/src/cycle_info.rs +++ b/massa-pos-exports/src/cycle_info.rs @@ -4,7 +4,6 @@ use massa_models::{ address::{Address, AddressDeserializer, AddressSerializer}, prehash::PreHashMap, serialization::{BitVecDeserializer, BitVecSerializer}, - slot::Slot, }; use massa_serialization::{ Deserializer, OptionDeserializer, OptionSerializer, SerializeError, Serializer, @@ -24,8 +23,6 @@ use serde::{Deserialize, Serialize}; use std::collections::{BTreeMap, VecDeque}; use std::ops::Bound::Included; -use crate::PoSChanges; - const CYCLE_INFO_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; struct CycleInfoHashComputer { @@ -155,185 +152,17 @@ impl CycleInfo { final_state_hash_snapshot: None, } } - - /// Apply every part of a `PoSChanges` to a cycle info, except for `deferred_credits` - pub(crate) fn apply_changes( - &mut self, - changes: PoSChanges, - slot: Slot, - periods_per_cycle: u64, - thread_count: u8, - ) -> bool { - let hash_computer = CycleInfoHashComputer::new(); - let slots_per_cycle = periods_per_cycle.saturating_mul(thread_count as u64); - let mut hash_concat: Vec = Vec::new(); - - // compute cycle hash and concat - let cycle_hash = hash_computer.compute_cycle_hash(self.cycle); - hash_concat.extend(cycle_hash.to_bytes()); - - // check for completion - self.complete = slot.is_last_of_cycle(periods_per_cycle, thread_count); - let complete_hash = hash_computer.compute_complete_hash(self.complete); - hash_concat.extend(complete_hash.to_bytes()); - - // extend seed_bits with changes.seed_bits - self.rng_seed.extend(changes.seed_bits); - let rng_seed_hash = hash_computer.compute_seed_hash(&self.rng_seed); - hash_concat.extend(rng_seed_hash.to_bytes()); - - // extend roll counts - for (addr, roll_count) in changes.roll_changes { - if roll_count == 0 && let Some(removed_count) = self.roll_counts.remove(&addr) { - self.roll_counts_hash ^= - hash_computer.compute_roll_entry_hash(&addr, removed_count); - } else { - if let Some(replaced_count) = self.roll_counts.insert(addr, roll_count) { - self.roll_counts_hash ^= - hash_computer.compute_roll_entry_hash(&addr, replaced_count); - } - self.roll_counts_hash ^= hash_computer.compute_roll_entry_hash(&addr, roll_count); - } - } - hash_concat.extend(self.roll_counts_hash.to_bytes()); - - // extend production stats - for (addr, stats) in changes.production_stats { - self.production_stats - .entry(addr) - .and_modify(|current_stats| { - self.production_stats_hash ^= - hash_computer.compute_prod_stats_entry_hash(&addr, current_stats); - current_stats.extend(&stats); - self.production_stats_hash ^= - hash_computer.compute_prod_stats_entry_hash(&addr, current_stats); - }) - .or_insert_with(|| { - self.production_stats_hash ^= - hash_computer.compute_prod_stats_entry_hash(&addr, &stats); - stats - }); - } - hash_concat.extend(self.production_stats_hash.to_bytes()); - - // if the cycle just completed, check that it has the right number of seed bits - if self.complete && self.rng_seed.len() as u64 != slots_per_cycle { - panic!( - "cycle completed with incorrect number of seed bits: {} instead of {}", - self.rng_seed.len(), - slots_per_cycle - ); - } - - // compute the global hash - self.cycle_global_hash = Hash::compute_from(&hash_concat); - - // return the completion status - self.complete - } -} - -#[test] -fn test_cycle_info_hash_computation() { - use crate::DeferredCredits; - use bitvec::prelude::*; - - // cycle and address - let mut cycle_a = CycleInfo::new_with_hash( - 0, - false, - BTreeMap::default(), - BitVec::default(), - PreHashMap::default(), - ); - let addr = Address::from_prefixed_bytes(&[0; 33].as_slice()).unwrap(); - - // add changes - let mut roll_changes = PreHashMap::default(); - roll_changes.insert(addr, 10); - let mut production_stats = PreHashMap::default(); - production_stats.insert( - addr, - ProductionStats { - block_success_count: 4, - block_failure_count: 0, - }, - ); - let changes = PoSChanges { - seed_bits: bitvec![u8, Lsb0; 0, 10], - roll_changes: roll_changes.clone(), - production_stats: production_stats.clone(), - deferred_credits: DeferredCredits::new_with_hash(), - }; - cycle_a.apply_changes(changes, Slot::new(0, 0), 2, 2); - - // update changes once - roll_changes.clear(); - roll_changes.insert(addr, 20); - production_stats.clear(); - production_stats.insert( - addr, - ProductionStats { - block_success_count: 4, - block_failure_count: 6, - }, - ); - let changes = PoSChanges { - seed_bits: bitvec![u8, Lsb0; 0, 20], - roll_changes: roll_changes.clone(), - production_stats: production_stats.clone(), - deferred_credits: DeferredCredits::new_with_hash(), - }; - cycle_a.apply_changes(changes, Slot::new(0, 1), 2, 2); - - // update changes twice - roll_changes.clear(); - roll_changes.insert(addr, 0); - production_stats.clear(); - production_stats.insert( - addr, - ProductionStats { - block_success_count: 4, - block_failure_count: 12, - }, - ); - let changes = PoSChanges { - seed_bits: bitvec![u8, Lsb0; 0, 30], - roll_changes, - production_stats, - deferred_credits: DeferredCredits::new_with_hash(), - }; - cycle_a.apply_changes(changes, Slot::new(1, 0), 2, 2); - - // create a seconde cycle from same value and match hash - let cycle_b = CycleInfo::new_with_hash( - 0, - cycle_a.complete, - cycle_a.roll_counts, - cycle_a.rng_seed, - cycle_a.production_stats, - ); - assert_eq!( - cycle_a.roll_counts_hash, cycle_b.roll_counts_hash, - "roll_counts_hash mismatch" - ); - assert_eq!( - cycle_a.production_stats_hash, cycle_b.production_stats_hash, - "production_stats_hash mismatch" - ); - assert_eq!( - cycle_a.cycle_global_hash, cycle_b.cycle_global_hash, - "global_hash mismatch" - ); } +#[derive(Clone)] +#[allow(missing_docs)] /// Serializer for `CycleInfo` pub struct CycleInfoSerializer { - u64_ser: U64VarIntSerializer, - bitvec_ser: BitVecSerializer, - production_stats_ser: ProductionStatsSerializer, - address_ser: AddressSerializer, - opt_hash_ser: OptionSerializer, + pub u64_ser: U64VarIntSerializer, + pub bitvec_ser: BitVecSerializer, + pub production_stats_ser: ProductionStatsSerializer, + pub address_ser: AddressSerializer, + pub opt_hash_ser: OptionSerializer, } impl Default for CycleInfoSerializer { @@ -386,13 +215,15 @@ impl Serializer for CycleInfoSerializer { } } +#[derive(Clone)] +#[allow(missing_docs)] /// Deserializer for `CycleInfo` pub struct CycleInfoDeserializer { - u64_deser: U64VarIntDeserializer, - rolls_deser: RollsDeserializer, - bitvec_deser: BitVecDeserializer, - production_stats_deser: ProductionStatsDeserializer, - opt_hash_deser: OptionDeserializer, + pub u64_deser: U64VarIntDeserializer, + pub rolls_deser: RollsDeserializer, + pub bitvec_deser: BitVecDeserializer, + pub production_stats_deser: ProductionStatsDeserializer, + pub opt_hash_deser: OptionDeserializer, } impl CycleInfoDeserializer { @@ -486,9 +317,11 @@ impl ProductionStats { } } +#[derive(Clone)] +#[allow(missing_docs)] /// Serializer for `ProductionStats` pub struct ProductionStatsSerializer { - u64_ser: U64VarIntSerializer, + pub u64_ser: U64VarIntSerializer, address_ser: AddressSerializer, } @@ -531,11 +364,13 @@ impl Serializer> for ProductionStatsSeriali } } +#[derive(Clone)] +#[allow(missing_docs)] /// Deserializer for `ProductionStats` pub struct ProductionStatsDeserializer { length_deserializer: U64VarIntDeserializer, - address_deserializer: AddressDeserializer, - u64_deserializer: U64VarIntDeserializer, + pub address_deserializer: AddressDeserializer, + pub u64_deserializer: U64VarIntDeserializer, } impl ProductionStatsDeserializer { @@ -594,11 +429,13 @@ impl Deserializer> for ProductionStatsDeser } } +#[derive(Clone)] +#[allow(missing_docs)] /// Deserializer for rolls pub struct RollsDeserializer { length_deserializer: U64VarIntDeserializer, - address_deserializer: AddressDeserializer, - u64_deserializer: U64VarIntDeserializer, + pub address_deserializer: AddressDeserializer, + pub u64_deserializer: U64VarIntDeserializer, } impl RollsDeserializer { @@ -640,10 +477,12 @@ impl Deserializer> for RollsDeserializer { } } +#[derive(Clone)] +#[allow(missing_docs)] /// Serializer for cycle history pub struct CycleHistorySerializer { - u64_serializer: U64VarIntSerializer, - cycle_info_serializer: CycleInfoSerializer, + pub u64_serializer: U64VarIntSerializer, + pub cycle_info_serializer: CycleInfoSerializer, } impl CycleHistorySerializer { @@ -677,10 +516,12 @@ impl Serializer> for CycleHistorySerializer { } } +#[derive(Clone)] +#[allow(missing_docs)] /// Deserializer for cycle history, useful when restarting from a snapshot pub struct CycleHistoryDeserializer { - u64_deserializer: U64VarIntDeserializer, - cycle_info_deserializer: CycleInfoDeserializer, + pub u64_deserializer: U64VarIntDeserializer, + pub cycle_info_deserializer: CycleInfoDeserializer, } impl CycleHistoryDeserializer { diff --git a/massa-pos-exports/src/deferred_credits.rs b/massa-pos-exports/src/deferred_credits.rs index 7b1262a63cf..a48cc7a76bb 100644 --- a/massa-pos-exports/src/deferred_credits.rs +++ b/massa-pos-exports/src/deferred_credits.rs @@ -225,11 +225,13 @@ impl DeferredCredits { } } +#[derive(Clone)] +#[allow(missing_docs)] /// Serializer for `DeferredCredits` pub struct DeferredCreditsSerializer { - slot_ser: SlotSerializer, - u64_ser: U64VarIntSerializer, - credits_ser: CreditsSerializer, + pub slot_ser: SlotSerializer, + pub u64_ser: U64VarIntSerializer, + pub credits_ser: CreditsSerializer, } impl Default for DeferredCreditsSerializer { @@ -269,11 +271,13 @@ impl Serializer for DeferredCreditsSerializer { } } +#[derive(Clone)] +#[allow(missing_docs)] /// Deserializer for `DeferredCredits` pub struct DeferredCreditsDeserializer { - u64_deserializer: U64VarIntDeserializer, - slot_deserializer: SlotDeserializer, - credit_deserializer: CreditsDeserializer, + pub u64_deserializer: U64VarIntDeserializer, + pub slot_deserializer: SlotDeserializer, + pub credit_deserializer: CreditsDeserializer, enable_hash: bool, } @@ -334,11 +338,13 @@ impl Deserializer for DeferredCreditsDeserializer { } } +#[derive(Clone)] +#[allow(missing_docs)] /// Serializer for `Credits` pub struct CreditsSerializer { - u64_ser: U64VarIntSerializer, - address_ser: AddressSerializer, - amount_ser: AmountSerializer, + pub u64_ser: U64VarIntSerializer, + pub address_ser: AddressSerializer, + pub amount_ser: AmountSerializer, } impl Default for CreditsSerializer { @@ -377,11 +383,13 @@ impl Serializer> for CreditsSerializer { } } +#[derive(Clone)] +#[allow(missing_docs)] /// Deserializer for a single credit -struct CreditsDeserializer { +pub struct CreditsDeserializer { u64_deserializer: U64VarIntDeserializer, - address_deserializer: AddressDeserializer, - amount_deserializer: AmountDeserializer, + pub address_deserializer: AddressDeserializer, + pub amount_deserializer: AmountDeserializer, } impl CreditsDeserializer { diff --git a/massa-pos-exports/src/lib.rs b/massa-pos-exports/src/lib.rs index ef1562eb16a..d7e9d74e587 100644 --- a/massa-pos-exports/src/lib.rs +++ b/massa-pos-exports/src/lib.rs @@ -17,6 +17,8 @@ mod pos_final_state; mod settings; pub use config::PoSConfig; +#[cfg(any(test, feature = "testing"))] +pub use controller_traits::MockSelectorController; pub use controller_traits::{Selection, SelectorController, SelectorManager}; pub use cycle_info::*; pub use deferred_credits::*; diff --git a/massa-pos-exports/src/pos_final_state.rs b/massa-pos-exports/src/pos_final_state.rs index 219588f74ce..695a9088e53 100644 --- a/massa-pos-exports/src/pos_final_state.rs +++ b/massa-pos-exports/src/pos_final_state.rs @@ -1,37 +1,154 @@ -use crate::{CycleInfo, PoSChanges, PosError, PosResult, ProductionStats, SelectorController}; +use crate::{ + CycleHistoryDeserializer, CycleHistorySerializer, CycleInfo, DeferredCreditsDeserializer, + DeferredCreditsSerializer, PoSChanges, PosError, PosResult, ProductionStats, + SelectorController, +}; use crate::{DeferredCredits, PoSConfig}; use bitvec::vec::BitVec; +use massa_db::{ + DBBatch, MassaDB, CF_ERROR, CYCLE_HISTORY_DESER_ERROR, CYCLE_HISTORY_PREFIX, + CYCLE_HISTORY_SER_ERROR, DEFERRED_CREDITS_DESER_ERROR, DEFERRED_CREDITS_PREFIX, + DEFERRED_CREDITS_SER_ERROR, STATE_CF, +}; use massa_hash::Hash; -use massa_models::error::ModelsError; -use massa_models::streaming_step::StreamingStep; +use massa_models::amount::Amount; use massa_models::{address::Address, prehash::PreHashMap, slot::Slot}; -use massa_serialization::{Serializer, U64VarIntSerializer}; +use massa_serialization::{DeserializeError, Deserializer, Serializer, U64VarIntSerializer}; +use nom::AsBytes; +use parking_lot::RwLock; +use rocksdb::{Direction, IteratorMode}; use std::collections::VecDeque; +use std::ops::Bound::{Excluded, Included}; use std::ops::RangeBounds; -use std::{ - collections::BTreeMap, - ops::Bound::{Excluded, Unbounded}, - path::PathBuf, -}; +use std::sync::Arc; +use std::{collections::BTreeMap, path::PathBuf}; use tracing::debug; +// General cycle info idents +const COMPLETE_IDENT: u8 = 0u8; +const RNG_SEED_IDENT: u8 = 1u8; +const FINAL_STATE_HASH_SNAPSHOT_IDENT: u8 = 2u8; +const ROLL_COUNT_IDENT: u8 = 3u8; +const PROD_STATS_IDENT: u8 = 4u8; + +// Production stats idents +const PROD_STATS_FAIL_IDENT: u8 = 0u8; +const PROD_STATS_SUCCESS_IDENT: u8 = 1u8; + +/// Complete key formatting macro +#[macro_export] +macro_rules! complete_key { + ($cycle_prefix:expr) => { + [&$cycle_prefix[..], &[COMPLETE_IDENT]].concat() + }; +} + +/// Rng seed key formatting macro +#[macro_export] +macro_rules! rng_seed_key { + ($cycle_prefix:expr) => { + [&$cycle_prefix[..], &[RNG_SEED_IDENT]].concat() + }; +} + +/// Final state hash snapshot key formatting macro +#[macro_export] +macro_rules! final_state_hash_snapshot_key { + ($cycle_prefix:expr) => { + [&$cycle_prefix[..], &[FINAL_STATE_HASH_SNAPSHOT_IDENT]].concat() + }; +} + +/// Roll count key prefix macro +#[macro_export] +macro_rules! roll_count_prefix { + ($cycle_prefix:expr) => { + [&$cycle_prefix[..], &[ROLL_COUNT_IDENT]].concat() + }; +} + +/// Roll count key formatting macro +#[macro_export] +macro_rules! roll_count_key { + ($cycle_prefix:expr, $addr:expr) => { + [ + &$cycle_prefix[..], + &[ROLL_COUNT_IDENT], + &$addr.to_prefixed_bytes()[..], + ] + .concat() + }; +} + +/// Production stats prefix macro +#[macro_export] +macro_rules! prod_stats_prefix { + ($cycle_prefix:expr) => { + [&$cycle_prefix[..], &[PROD_STATS_IDENT]].concat() + }; +} + +/// Production stats fail key formatting macro +#[macro_export] +macro_rules! prod_stats_fail_key { + ($cycle_prefix:expr, $addr:expr) => { + [ + &$cycle_prefix[..], + &[PROD_STATS_IDENT], + &$addr.to_prefixed_bytes()[..], + &[PROD_STATS_FAIL_IDENT], + ] + .concat() + }; +} + +/// Production stats success key formatting macro +#[macro_export] +macro_rules! prod_stats_success_key { + ($cycle_prefix:expr, $addr:expr) => { + [ + &$cycle_prefix[..], + &[PROD_STATS_IDENT], + &$addr.to_prefixed_bytes()[..], + &[PROD_STATS_SUCCESS_IDENT], + ] + .concat() + }; +} + +/// Deferred credits key formatting macro +#[macro_export] +macro_rules! deferred_credits_key { + ($id:expr) => { + [&DEFERRED_CREDITS_PREFIX.as_bytes(), &$id[..]].concat() + }; +} + #[derive(Clone)] /// Final state of PoS pub struct PoSFinalState { /// proof-of-stake configuration pub config: PoSConfig, + /// Access to the RocksDB database + pub db: Arc>, /// contiguous cycle history, back = newest - pub cycle_history: VecDeque, - /// coins to be credited at the end of the slot - pub deferred_credits: DeferredCredits, + pub cycle_history_cache: VecDeque<(u64, bool)>, + /// rng_seed cache to get rng_seed for the current cycle + pub rng_seed_cache: Option<(u64, BitVec)>, /// selector controller pub selector: Box, /// initial rolls, used for negative cycle look back pub initial_rolls: BTreeMap, /// initial seeds, used for negative cycle look back (cycles -2, -1 in that order) pub initial_seeds: Vec, - /// initial ledger hash, used for seed computation - pub initial_ledger_hash: Hash, + /// deferred credits serializer + pub deferred_credits_serializer: DeferredCreditsSerializer, + /// deferred credits deserializer + pub deferred_credits_deserializer: DeferredCreditsDeserializer, + /// cycle info serializer + pub cycle_info_serializer: CycleHistorySerializer, + /// cycle info deserializer + pub cycle_info_deserializer: CycleHistoryDeserializer, } impl PoSFinalState { @@ -41,7 +158,7 @@ impl PoSFinalState { initial_seed_string: &str, initial_rolls_path: &PathBuf, selector: Box, - initial_ledger_hash: Hash, + db: Arc>, ) -> Result { // load get initial rolls from file let initial_rolls = serde_json::from_str::>( @@ -55,61 +172,57 @@ impl PoSFinalState { let init_seed = Hash::compute_from(initial_seed_string.as_bytes()); let initial_seeds = vec![Hash::compute_from(init_seed.to_bytes()), init_seed]; - Ok(Self { + let deferred_credits_deserializer = + DeferredCreditsDeserializer::new(config.thread_count, config.max_credit_length, true); + let cycle_info_deserializer = CycleHistoryDeserializer::new( + config.cycle_history_length as u64, + config.max_rolls_length, + config.max_production_stats_length, + ); + + let pos_state = Self { config, - cycle_history: Default::default(), - deferred_credits: DeferredCredits::new_with_hash(), + db, + cycle_history_cache: Default::default(), + rng_seed_cache: None, selector, initial_rolls, initial_seeds, - initial_ledger_hash, - }) + deferred_credits_serializer: DeferredCreditsSerializer::new(), + deferred_credits_deserializer, + cycle_info_serializer: CycleHistorySerializer::new(), + cycle_info_deserializer, + }; + + Ok(pos_state) } - /// create a `PoSFinalState` from an existing snapshot - pub fn from_snapshot( - config: PoSConfig, - cycle_history: VecDeque, - deferred_credits: DeferredCredits, - initial_seed_string: &str, - initial_rolls_path: &PathBuf, - selector: Box, - initial_ledger_hash: Hash, - ) -> Result { - // Seeds used as the initial seeds for negative cycles (-2 and -1 respectively) - let init_seed = Hash::compute_from(initial_seed_string.as_bytes()); - let initial_seeds = vec![Hash::compute_from(init_seed.to_bytes()), init_seed]; - // load get initial rolls from file - let initial_rolls = serde_json::from_str::>( - &std::fs::read_to_string(initial_rolls_path).map_err(|err| { - PosError::RollsFileLoadingError(format!("error while deserializing: {}", err)) - })?, - ) - .map_err(|err| PosError::RollsFileLoadingError(format!("error opening file: {}", err)))?; + /// After bootstrap or load from disk, recompute the caches + pub fn recompute_pos_state_caches(&mut self) { + self.cycle_history_cache = self.get_cycle_history_cycles().into(); - Ok(Self { - config, - cycle_history, - deferred_credits, - selector, - initial_rolls, - initial_seeds, - initial_ledger_hash, - }) + if let Some((cycle, _)) = self.cycle_history_cache.back() { + self.rng_seed_cache = Some((*cycle, self.get_cycle_history_rng_seed(*cycle))); + } else { + self.rng_seed_cache = None; + } } /// Reset the state of the PoS final state /// /// USED ONLY FOR BOOTSTRAP pub fn reset(&mut self) { - self.cycle_history.clear(); - self.deferred_credits = DeferredCredits::new_with_hash(); + let mut db = self.db.write(); + db.delete_prefix(CYCLE_HISTORY_PREFIX, STATE_CF, None); + db.delete_prefix(DEFERRED_CREDITS_PREFIX, STATE_CF, None); + self.cycle_history_cache = Default::default(); + self.rng_seed_cache = None; } /// Create the initial cycle based off the initial rolls. /// /// This should be called only if bootstrap did not happen. - pub fn create_initial_cycle(&mut self) { + pub fn create_initial_cycle(&mut self, batch: &mut DBBatch) { let mut rng_seed = BitVec::with_capacity( self.config .periods_per_cycle @@ -119,23 +232,27 @@ impl PoSFinalState { ); rng_seed.extend(vec![false; self.config.thread_count as usize]); - self.cycle_history.push_back(CycleInfo::new_with_hash( - 0, - false, - self.initial_rolls.clone(), - rng_seed, - PreHashMap::default(), - )); + self.put_new_cycle_info( + &CycleInfo::new_with_hash( + 0, + false, + self.initial_rolls.clone(), + rng_seed, + PreHashMap::default(), + ), + batch, + ); } - /// Create the a cycle based off of another cycle_info. Used for downtime interpolation, - /// when restarting from a snapshot. + /// Create the a cycle based off of another cycle_info. /// + /// Used for downtime interpolation, when restarting from a snapshot. pub fn create_new_cycle_from_last( &mut self, last_cycle_info: &CycleInfo, first_slot: Slot, last_slot: Slot, + batch: &mut DBBatch, ) -> Result<(), PosError> { let mut rng_seed = if first_slot.is_first_of_cycle(self.config.periods_per_cycle) { BitVec::with_capacity( @@ -161,25 +278,43 @@ impl PoSFinalState { let complete = last_slot.is_last_of_cycle(self.config.periods_per_cycle, self.config.thread_count); - self.cycle_history.push_back(CycleInfo::new_with_hash( - cycle, - complete, - last_cycle_info.roll_counts.clone(), - rng_seed, - last_cycle_info.production_stats.clone(), - )); + self.put_new_cycle_info( + &CycleInfo::new_with_hash( + cycle, + complete, + last_cycle_info.roll_counts.clone(), + rng_seed, + last_cycle_info.production_stats.clone(), + ), + batch, + ); Ok(()) } + /// Deletes a given cycle from RocksDB + pub fn delete_cycle_info(&mut self, cycle: u64, batch: &mut DBBatch) { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let prefix = self.cycle_history_cycle_prefix(cycle); + + for (serialized_key, _) in db.db.prefix_iterator_cf(handle, &prefix).flatten() { + if !serialized_key.starts_with(prefix.as_bytes()) { + break; + } + db.delete_key(batch, serialized_key.to_vec()); + } + } + /// Sends the current draw inputs (initial or bootstrapped) to the selector. /// Waits for the initial draws to be performed. pub fn compute_initial_draws(&mut self) -> PosResult<()> { // if cycle_history starts at a cycle that is strictly higher than 0, do not feed cycles 0, 1 to selector let history_starts_late = self - .cycle_history + .cycle_history_cache .front() - .map(|c_info| c_info.cycle > 0) + .map(|c_info| c_info.0 > 0) .unwrap_or(false); let mut max_cycle = None; @@ -193,8 +328,8 @@ impl PoSFinalState { } // feed cycles available from history - for (idx, hist_item) in self.cycle_history.iter().enumerate() { - if !hist_item.complete { + for (idx, hist_item) in self.cycle_history_cache.iter().enumerate() { + if !hist_item.1 { break; } if history_starts_late && idx == 0 { @@ -202,7 +337,7 @@ impl PoSFinalState { // because the roll distribution which should be provided by the previous element is absent. continue; } - let draw_cycle = hist_item.cycle.checked_add(2).ok_or_else(|| { + let draw_cycle = hist_item.0.checked_add(2).ok_or_else(|| { PosError::OverflowError("cycle overflow in give_selector_controller".into()) })?; self.feed_selector(draw_cycle)?; @@ -216,13 +351,14 @@ impl PoSFinalState { Ok(()) } - /// Technical specification of `apply_changes`: + /// Technical specification of `apply_changes_to_batch`: /// /// set `self.last_final_slot` = C - /// if cycle C is absent from `self.cycle_history`: - /// `push` a new empty `CycleInfo` at the back of `self.cycle_history` and set its cycle = C - /// `pop_front` from `cycle_history` until front() represents cycle C-4 or later (not C-3 because we might need older endorsement draws on the limit between 2 cycles) - /// for the cycle C entry of `cycle_history`: + /// if cycle C is absent from `self.cycle_history_cache`: + /// `push` a new empty `CycleInfo` on disk and reflect in `self.cycle_history_cache` and set its cycle = C + /// `pop_front` from `cycle_history_cache` until front() represents cycle C-4 or later (not C-3 because we might need older endorsement draws on the limit between 2 cycles) + /// delete the removed cycles from disk + /// for the cycle C entry in the db: /// extend `seed_bits` with `changes.seed_bits` /// extend `roll_counts` with `changes.roll_changes` /// delete all entries from `roll_counts` for which the roll count is zero @@ -234,11 +370,12 @@ impl PoSFinalState { /// set complete=true for cycle C in the history /// compute the seed hash and notifies the `PoSDrawer` for cycle `C+3` /// - pub fn apply_changes( + pub fn apply_changes_to_batch( &mut self, changes: PoSChanges, slot: Slot, feed_selector: bool, + batch: &mut DBBatch, ) -> PosResult<()> { let slots_per_cycle: usize = self .config @@ -254,20 +391,27 @@ impl PoSFinalState { // push a new empty CycleInfo at the back of self.cycle_history and set its cycle = C // pop_front from cycle_history until front() represents cycle C-4 or later // (not C-3 because we might need older endorsement draws on the limit between 2 cycles) - if let Some(info) = self.cycle_history.back() { - if cycle == info.cycle && !info.complete { + if let Some(info) = self.cycle_history_cache.back() { + if cycle == info.0 && !info.1 { // extend the last incomplete cycle - } else if info.cycle.checked_add(1) == Some(cycle) && info.complete { + } else if info.0.checked_add(1) == Some(cycle) && info.1 { // the previous cycle is complete, push a new incomplete/empty one to extend - self.cycle_history.push_back(CycleInfo::new_with_hash( - cycle, - false, - info.roll_counts.clone(), - BitVec::with_capacity(slots_per_cycle), - PreHashMap::default(), - )); - while self.cycle_history.len() > self.config.cycle_history_length { - self.cycle_history.pop_front(); + + let roll_counts = self.get_all_roll_counts(info.0); + self.put_new_cycle_info( + &CycleInfo::new_with_hash( + cycle, + false, + roll_counts, + BitVec::with_capacity(slots_per_cycle), + PreHashMap::default(), + ), + batch, + ); + while self.cycle_history_cache.len() > self.config.cycle_history_length { + if let Some((old_cycle, _)) = self.cycle_history_cache.pop_front() { + self.delete_cycle_info(old_cycle, batch); + } } } else { return Err(PosError::OverflowError( @@ -280,37 +424,62 @@ impl PoSFinalState { )); } - // get the last history cycle, should always be present because it was filled above - let current = self - .cycle_history - .back_mut() - .expect("cycle history should be non-empty"); + let complete: bool = + slot.is_last_of_cycle(self.config.periods_per_cycle, self.config.thread_count); + self.put_cycle_history_complete(cycle, complete, batch); - // apply changes to the current cycle - let cycle_completed = current.apply_changes( - changes.clone(), - slot, - self.config.periods_per_cycle, - self.config.thread_count, - ); + // OPTIM: we could avoid reading the previous seed bits with a cache or with an update function + let mut rng_seed = self.get_cycle_history_rng_seed(cycle); + rng_seed.extend(changes.seed_bits); + self.put_cycle_history_rng_seed(cycle, rng_seed.clone(), batch); - // extent deferred_credits with changes.deferred_credits - // remove zero-valued credits - self.deferred_credits.extend(changes.deferred_credits); - self.deferred_credits.remove_zeros(); + // extend roll counts + for (addr, roll_count) in changes.roll_changes { + self.put_cycle_history_address_entry(cycle, &addr, Some(&roll_count), None, batch); + } + + // extend production stats + for (addr, stats) in changes.production_stats { + if let Some(prev_production_stats) = self.get_production_stats_for_address(cycle, addr) + { + let mut new_production_stats = prev_production_stats; + new_production_stats.extend(&stats); + self.put_cycle_history_address_entry( + cycle, + &addr, + None, + Some(&new_production_stats), + batch, + ); + } else { + self.put_cycle_history_address_entry(cycle, &addr, None, Some(&stats), batch); + } + } + + // if the cycle just completed, check that it has the right number of seed bits + if complete && rng_seed.len() != slots_per_cycle { + panic!( + "cycle completed with incorrect number of seed bits: {} instead of {}", + rng_seed.len(), + slots_per_cycle + ); + } + + // extend deferred_credits with changes.deferred_credits and remove zeros + for (slot, credits) in changes.deferred_credits.credits.iter() { + for (address, amount) in credits.iter() { + self.put_deferred_credits_entry(slot, address, amount, batch); + } + } // feed the cycle if it is complete // notify the PoSDrawer about the newly ready draw data // to draw cycle + 2, we use the rng data from cycle - 1 and the seed from cycle debug!( "After slot {} PoS cycle list is {:?}", - slot, - self.cycle_history - .iter() - .map(|c| (c.cycle, c.complete)) - .collect::>() + slot, self.cycle_history_cache ); - if cycle_completed && feed_selector { + if complete && feed_selector { self.feed_selector(cycle.checked_add(2).ok_or_else(|| { PosError::OverflowError("cycle overflow when feeding selector".into()) })?) @@ -323,28 +492,29 @@ impl PoSFinalState { pub fn feed_selector(&self, draw_cycle: u64) -> PosResult<()> { // get roll lookback - //info!("Feed selector with draw cycle: {}", draw_cycle); - let (lookback_rolls, lookback_state_hash) = match draw_cycle.checked_sub(3) { // looking back in history Some(c) => { let index = self .get_cycle_index(c) .ok_or(PosError::CycleUnavailable(c))?; - let cycle_info = &self.cycle_history[index]; - if !cycle_info.complete { + let cycle_info = &self.cycle_history_cache[index]; + if !cycle_info.1 { return Err(PosError::CycleUnfinished(c)); } // take the final_state_hash_snapshot at cycle - 3 // it will later be combined with rng_seed from cycle - 2 to determine the selection seed // do this here to avoid a potential attacker manipulating the selections - let state_hash = cycle_info - .final_state_hash_snapshot - .expect("critical: a complete cycle must contain a final state hash snapshot"); - (cycle_info.roll_counts.clone(), state_hash) + let state_hash = self.get_cycle_history_final_state_hash_snapshot(cycle_info.0); + ( + self.get_all_roll_counts(cycle_info.0), + Some(state_hash.expect( + "critical: a complete cycle must contain a final state hash snapshot", + )), + ) } // looking back to negative cycles - None => (self.initial_rolls.clone(), self.initial_ledger_hash), + None => (self.initial_rolls.clone(), None), }; // get seed lookback @@ -354,15 +524,17 @@ impl PoSFinalState { let index = self .get_cycle_index(c) .ok_or(PosError::CycleUnavailable(c))?; - let cycle_info = &self.cycle_history[index]; - if !cycle_info.complete { + let cycle_info = &self.cycle_history_cache[index]; + if !cycle_info.1 { return Err(PosError::CycleUnfinished(c)); } let u64_ser = U64VarIntSerializer::new(); let mut seed = Vec::new(); u64_ser.serialize(&c, &mut seed).unwrap(); - seed.extend(cycle_info.rng_seed.clone().into_vec()); - seed.extend(lookback_state_hash.to_bytes()); + seed.extend(self.get_cycle_history_rng_seed(cycle_info.0).into_vec()); + if let Some(lookback_state_hash) = lookback_state_hash { + seed.extend(lookback_state_hash.to_bytes()); + } Hash::compute_from(&seed) } // looking back to negative cycles @@ -376,20 +548,51 @@ impl PoSFinalState { } /// Feeds the selector targeting a given draw cycle - pub fn feed_cycle_state_hash(&mut self, cycle: u64, final_state_hash: Hash) { - if let Some(index) = self.get_cycle_index(cycle) { - let cycle = self.cycle_history.get_mut(index).unwrap(); - cycle.final_state_hash_snapshot = Some(final_state_hash); + pub fn feed_cycle_state_hash(&self, cycle: u64, final_state_hash: Hash) { + if self.get_cycle_index(cycle).is_some() { + let mut batch = DBBatch::new(); + self.put_cycle_history_final_state_hash_snapshot( + cycle, + Some(final_state_hash), + &mut batch, + ); + + self.db.write().write_batch(batch, Default::default(), None); } else { panic!("cycle {} should be contained here", cycle); } } +} +// RocksDB getters +impl PoSFinalState { /// Retrieves the amount of rolls a given address has at the latest cycle pub fn get_rolls_for(&self, addr: &Address) -> u64 { - self.cycle_history + self.cycle_history_cache .back() - .and_then(|info| info.roll_counts.get(addr).cloned()) + .and_then(|info| { + let cycle = info.0; + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let key = roll_count_key!(self.cycle_history_cycle_prefix(cycle), addr); + + if let Some(serialized_value) = + db.db.get_cf(handle, key).expect(CYCLE_HISTORY_DESER_ERROR) + { + let (_, amount) = self + .cycle_info_deserializer + .cycle_info_deserializer + .rolls_deser + .u64_deserializer + .deserialize::(&serialized_value) + .expect(CYCLE_HISTORY_DESER_ERROR); + + Some(amount) + } else { + None + } + }) .unwrap_or_default() } @@ -397,15 +600,26 @@ impl PoSFinalState { pub fn get_address_active_rolls(&self, addr: &Address, cycle: u64) -> Option { match cycle.checked_sub(3) { Some(lookback_cycle) => { - let lookback_index = match self.get_cycle_index(lookback_cycle) { - Some(idx) => idx, - None => return None, - }; - // get rolls - self.cycle_history[lookback_index] - .roll_counts - .get(addr) - .cloned() + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let key = roll_count_key!(self.cycle_history_cycle_prefix(lookback_cycle), addr); + + if let Some(serialized_value) = + db.db.get_cf(handle, key).expect(CYCLE_HISTORY_DESER_ERROR) + { + let (_, amount) = self + .cycle_info_deserializer + .cycle_info_deserializer + .rolls_deser + .u64_deserializer + .deserialize::(&serialized_value) + .expect(CYCLE_HISTORY_DESER_ERROR); + + Some(amount) + } else { + None + } } None => self.initial_rolls.get(addr).cloned(), } @@ -416,22 +630,79 @@ impl PoSFinalState { where R: RangeBounds, { - self.deferred_credits.get_slot_range(range, false) - } + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); - /// Retrieves the productions statistics for all addresses on a given cycle - pub fn get_all_production_stats( - &self, - cycle: u64, - ) -> Option<&PreHashMap> { - self.get_cycle_index(cycle) - .map(|idx| &self.cycle_history[idx].production_stats) + let mut deferred_credits = DeferredCredits::new_without_hash(); + + let mut start_key_buffer = Vec::new(); + start_key_buffer.extend_from_slice(DEFERRED_CREDITS_PREFIX.as_bytes()); + + match range.start_bound() { + Included(slot) => { + self.deferred_credits_serializer + .slot_ser + .serialize(slot, &mut start_key_buffer) + .expect(DEFERRED_CREDITS_SER_ERROR); + } + Excluded(slot) => { + self.deferred_credits_serializer + .slot_ser + .serialize( + &slot + .get_next_slot(self.config.thread_count) + .expect(DEFERRED_CREDITS_SER_ERROR), + &mut start_key_buffer, + ) + .expect(DEFERRED_CREDITS_SER_ERROR); + } + _ => {} + }; + + for (serialized_key, serialized_value) in db + .db + .iterator_cf( + handle, + IteratorMode::From(&start_key_buffer, Direction::Forward), + ) + .flatten() + { + if !serialized_key.starts_with(DEFERRED_CREDITS_PREFIX.as_bytes()) { + break; + } + let (rest, slot) = self + .deferred_credits_deserializer + .slot_deserializer + .deserialize::(&serialized_key[DEFERRED_CREDITS_PREFIX.len()..]) + .expect(DEFERRED_CREDITS_DESER_ERROR); + if !range.contains(&slot) { + break; + } + + let (_, address) = self + .deferred_credits_deserializer + .credit_deserializer + .address_deserializer + .deserialize::(rest) + .expect(DEFERRED_CREDITS_DESER_ERROR); + + let (_, amount) = self + .deferred_credits_deserializer + .credit_deserializer + .amount_deserializer + .deserialize::(&serialized_value) + .expect(DEFERRED_CREDITS_DESER_ERROR); + + deferred_credits.insert(slot, address, amount); + } + + deferred_credits } /// Gets the index of a cycle in history pub fn get_cycle_index(&self, cycle: u64) -> Option { - let first_cycle = match self.cycle_history.front() { - Some(c) => c.cycle, + let first_cycle = match self.cycle_history_cache.front() { + Some(c) => c.0, None => return None, // history empty }; if cycle < first_cycle { @@ -441,121 +712,935 @@ impl PoSFinalState { Ok(v) => v, Err(_) => return None, // usize overflow }; - if index >= self.cycle_history.len() { + if index >= self.cycle_history_cache.len() { return None; // in the future } Some(index) } - /// Gets a cycle of the Proof of Stake `cycle_history`. Used only in the bootstrap process. - /// - /// # Arguments: - /// `cursor`: indicates the bootstrap state after the previous payload - /// - /// # Returns - /// The PoS cycle and the updated cursor - pub fn get_cycle_history_part( + /// Get all the roll counts for a given cycle + pub fn get_all_roll_counts(&self, cycle: u64) -> BTreeMap { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let mut roll_counts: BTreeMap = BTreeMap::new(); + + let prefix = roll_count_prefix!(self.cycle_history_cycle_prefix(cycle)); + for (serialized_key, serialized_value) in + db.db.prefix_iterator_cf(handle, &prefix).flatten() + { + if !serialized_key.starts_with(prefix.as_bytes()) { + break; + } + + let (rest, _cycle) = self + .cycle_info_deserializer + .cycle_info_deserializer + .u64_deser + .deserialize::(&serialized_key[CYCLE_HISTORY_PREFIX.len()..]) + .expect(CYCLE_HISTORY_DESER_ERROR); + + let (_, address) = self + .cycle_info_deserializer + .cycle_info_deserializer + .rolls_deser + .address_deserializer + .deserialize::(&rest[1..]) + .expect(CYCLE_HISTORY_DESER_ERROR); + + let (_, amount) = self + .cycle_info_deserializer + .cycle_info_deserializer + .rolls_deser + .u64_deserializer + .deserialize::(&serialized_value) + .expect(CYCLE_HISTORY_DESER_ERROR); + + roll_counts.insert(address, amount); + } + + roll_counts + } + + /// Retrieves the productions statistics for all addresses on a given cycle + pub fn get_all_production_stats( &self, - cursor: StreamingStep, - ) -> Result<(Option, StreamingStep), ModelsError> { - let cycle_index = match cursor { - StreamingStep::Started => { - usize::from(self.cycle_history.len() >= self.config.cycle_history_length) + cycle: u64, + ) -> Option> { + self.get_cycle_index(cycle) + .map(|idx| self.get_all_production_stats_private(self.cycle_history_cache[idx].0)) + } + + /// Retrieves the productions statistics for all addresses on a given cycle + pub fn get_all_production_stats_private( + &self, + cycle: u64, + ) -> PreHashMap { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let mut production_stats: PreHashMap = PreHashMap::default(); + let mut cur_production_stat = ProductionStats::default(); + let mut cur_address = None; + + let prefix = prod_stats_prefix!(self.cycle_history_cycle_prefix(cycle)); + for (serialized_key, serialized_value) in + db.db.prefix_iterator_cf(handle, &prefix).flatten() + { + if !serialized_key.starts_with(prefix.as_bytes()) { + break; } - StreamingStep::Ongoing(last_cycle) => { - if let Some(index) = self.get_cycle_index(last_cycle) { - if index == self.cycle_history.len() - 1 { - return Ok((None, StreamingStep::Finished(None))); - } - index.saturating_add(1) - } else { - return Err(ModelsError::OutdatedBootstrapCursor); - } + let (rest, _cycle) = self + .cycle_info_deserializer + .cycle_info_deserializer + .u64_deser + .deserialize::(&serialized_key[CYCLE_HISTORY_PREFIX.len()..]) + .expect(CYCLE_HISTORY_DESER_ERROR); + + let (rest, address) = self + .cycle_info_deserializer + .cycle_info_deserializer + .production_stats_deser + .address_deserializer + .deserialize::(&rest[1..]) + .expect(CYCLE_HISTORY_DESER_ERROR); + + if cur_address != Some(address) { + cur_address = Some(address); + cur_production_stat = ProductionStats::default(); } - StreamingStep::Finished(_) => return Ok((None, cursor)), - }; - let cycle_info = self - .cycle_history - .get(cycle_index) - .expect("a cycle should be available here"); - Ok(( - Some(cycle_info.clone()), - StreamingStep::Ongoing(cycle_info.cycle), - )) + + let (_, value) = self + .cycle_info_deserializer + .cycle_info_deserializer + .production_stats_deser + .u64_deserializer + .deserialize::(&serialized_value) + .expect(CYCLE_HISTORY_DESER_ERROR); + + if rest.len() == 1 && rest[0] == PROD_STATS_FAIL_IDENT { + cur_production_stat.block_failure_count = value; + } else if rest.len() == 1 && rest[0] == PROD_STATS_SUCCESS_IDENT { + cur_production_stat.block_success_count = value; + } else { + panic!("{}", CYCLE_HISTORY_DESER_ERROR); + } + + production_stats.insert(address, cur_production_stat); + } + + production_stats } - /// Gets a part of the Proof of Stake `deferred_credits`. Used only in the bootstrap process. + /// Getter for the rng_seed of a given cycle, prioritizing the cache and querying the database as fallback. /// - /// # Arguments: - /// `cursor`: indicates the bootstrap state after the previous payload + /// Panics if the cycle is not in the history. + fn get_cycle_history_rng_seed(&self, cycle: u64) -> BitVec { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + if let Some((cached_cycle, rng_seed)) = &self.rng_seed_cache && *cached_cycle == cycle { + return rng_seed.clone(); + } + + let serialized_rng_seed = db + .db + .get_cf( + handle, + rng_seed_key!(self.cycle_history_cycle_prefix(cycle)), + ) + .expect(CYCLE_HISTORY_DESER_ERROR) + .expect(CYCLE_HISTORY_DESER_ERROR); + + let (_, rng_seed) = self + .cycle_info_deserializer + .cycle_info_deserializer + .bitvec_deser + .deserialize::(&serialized_rng_seed) + .expect(CYCLE_HISTORY_DESER_ERROR); + + rng_seed + } + + /// Getter for the final_state_hash_snapshot of a given cycle. /// - /// # Returns - /// The PoS `deferred_credits` part and the updated cursor - pub fn get_deferred_credits_part( - &self, - cursor: StreamingStep, - ) -> (DeferredCredits, StreamingStep) { - let mut credits_part = DeferredCredits::new_with_hash(); - let left_bound = match cursor { - StreamingStep::Started => Unbounded, - StreamingStep::Ongoing(last_slot) => Excluded(last_slot), - StreamingStep::Finished(_) => return (credits_part, cursor), - }; - let mut credit_part_last_slot: Option = None; - for (slot, credits) in self.deferred_credits.credits.range((left_bound, Unbounded)) { - if credits_part.credits.len() < self.config.credits_bootstrap_part_size as usize { - credits_part.credits.insert(*slot, credits.clone()); - credit_part_last_slot = Some(*slot); - } else { + /// Panics if the cycle is not in the history. + fn get_cycle_history_final_state_hash_snapshot(&self, cycle: u64) -> Option { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let serialized_state_hash = db + .db + .get_cf( + handle, + final_state_hash_snapshot_key!(self.cycle_history_cycle_prefix(cycle)), + ) + .expect(CYCLE_HISTORY_DESER_ERROR) + .expect(CYCLE_HISTORY_DESER_ERROR); + let (_, state_hash) = self + .cycle_info_deserializer + .cycle_info_deserializer + .opt_hash_deser + .deserialize::(&serialized_state_hash) + .expect(CYCLE_HISTORY_DESER_ERROR); + state_hash + } + + /// Used to recompute the cycle cache from the disk. + /// + fn get_cycle_history_cycles(&self) -> Vec<(u64, bool)> { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let mut found_cycles: Vec<(u64, bool)> = Vec::new(); + + while let Some(Ok((serialized_key, _))) = match found_cycles.last() { + Some((prev_cycle, _)) => db + .db + .iterator_cf( + handle, + IteratorMode::From( + &self.cycle_history_cycle_prefix(prev_cycle.saturating_add(1)), + Direction::Forward, + ), + ) + .next(), + None => db + .db + .iterator_cf( + handle, + IteratorMode::From(CYCLE_HISTORY_PREFIX.as_bytes(), Direction::Forward), + ) + .next(), + } { + if !serialized_key.starts_with(CYCLE_HISTORY_PREFIX.as_bytes()) { break; } + let (_, cycle) = self + .cycle_info_deserializer + .cycle_info_deserializer + .u64_deser + .deserialize::(&serialized_key[CYCLE_HISTORY_PREFIX.len()..]) + .expect(CYCLE_HISTORY_DESER_ERROR); + + found_cycles.push((cycle, self.is_cycle_complete(cycle))); } - if let Some(last_slot) = credit_part_last_slot { - (credits_part, StreamingStep::Ongoing(last_slot)) - } else { - (credits_part, StreamingStep::Finished(None)) + + found_cycles + } + + /// Queries a given cycle info in the database + /// Panics if the cycle is not on disk + pub fn get_cycle_info(&self, cycle: u64) -> CycleInfo { + let complete = self.is_cycle_complete(cycle); + let rng_seed = self.get_cycle_history_rng_seed(cycle); + let final_state_hash_snapshot = self.get_cycle_history_final_state_hash_snapshot(cycle); + + let roll_counts = self.get_all_roll_counts(cycle); + let production_stats = self + .get_all_production_stats(cycle) + .unwrap_or(PreHashMap::default()); + + let mut cycle_info = + CycleInfo::new_with_hash(cycle, complete, roll_counts, rng_seed, production_stats); + cycle_info.final_state_hash_snapshot = final_state_hash_snapshot; + cycle_info + } + + /// Gets the deferred credits for a given address that will be credited at a given slot + pub fn get_address_credits_for_slot(&self, addr: &Address, slot: &Slot) -> Option { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let mut serialized_key = Vec::new(); + self.deferred_credits_serializer + .slot_ser + .serialize(slot, &mut serialized_key) + .expect(DEFERRED_CREDITS_SER_ERROR); + self.deferred_credits_serializer + .credits_ser + .address_ser + .serialize(addr, &mut serialized_key) + .expect(DEFERRED_CREDITS_SER_ERROR); + + match db.db.get_cf(handle, deferred_credits_key!(serialized_key)) { + Ok(Some(serialized_amount)) => { + let (_, amount) = self + .deferred_credits_deserializer + .credit_deserializer + .amount_deserializer + .deserialize::(&serialized_amount) + .expect(DEFERRED_CREDITS_DESER_ERROR); + Some(amount) + } + _ => None, } } - /// Sets a part of the Proof of Stake `cycle_history`. Used only in the bootstrap process. - /// - /// # Arguments - /// `part`: a `CycleInfo` received from `get_pos_state_part` and used to update PoS final state - pub fn set_cycle_history_part(&mut self, part: Option) -> StreamingStep { - if let Some(cycle_info) = part { - let opt_next_cycle = self - .cycle_history - .back() - .map(|info| info.cycle.saturating_add(1)); - let received_cycle = cycle_info.cycle; - if let Some(next_cycle) = opt_next_cycle && received_cycle != next_cycle { - panic!( - "PoS received cycle ({}) should be equal to the next expected cycle ({})", - received_cycle, next_cycle - ); + /// Gets the production stats for a given address + pub fn get_production_stats_for_address( + &self, + cycle: u64, + address: Address, + ) -> Option { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let prefix = self.cycle_history_cycle_prefix(cycle); + + let query = vec![ + (handle, prod_stats_fail_key!(prefix, address)), + (handle, prod_stats_success_key!(prefix, address)), + ]; + + let results = db.db.multi_get_cf(query); + + match (results.get(0), results.get(1)) { + (Some(Ok(Some(serialized_fail))), Some(Ok(Some(serialized_success)))) => { + let (_, fail) = self + .cycle_info_deserializer + .cycle_info_deserializer + .production_stats_deser + .u64_deserializer + .deserialize::(serialized_fail) + .expect(CYCLE_HISTORY_DESER_ERROR); + let (_, success) = self + .cycle_info_deserializer + .cycle_info_deserializer + .production_stats_deser + .u64_deserializer + .deserialize::(serialized_success) + .expect(CYCLE_HISTORY_DESER_ERROR); + + Some(ProductionStats { + block_success_count: success, + block_failure_count: fail, + }) } - self.cycle_history.push_back(cycle_info); - StreamingStep::Ongoing(received_cycle) + _ => None, + } + } + + fn is_cycle_complete(&self, cycle: u64) -> bool { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let prefix = self.cycle_history_cycle_prefix(cycle); + + if let Ok(Some(complete_value)) = db.db.get_cf(handle, complete_key!(prefix)) { + complete_value.len() == 1 && complete_value[0] == 1 } else { - StreamingStep::Finished(None) + false } } +} - /// Sets a part of the Proof of Stake `deferred_credits`. Used only in the bootstrap process. - /// - /// # Arguments - /// `part`: `DeferredCredits` from `get_pos_state_part` and used to update PoS final state - pub fn set_deferred_credits_part(&mut self, part: DeferredCredits) -> StreamingStep { - self.deferred_credits.extend(part); - if let Some(slot) = self - .deferred_credits - .credits - .last_key_value() - .map(|(&slot, _)| slot) - { - StreamingStep::Ongoing(slot) +// RocksDB setters +impl PoSFinalState { + /// Helper function to put a new CycleInfo to RocksDB, and update the cycle_history cache + fn put_new_cycle_info(&mut self, cycle_info: &CycleInfo, batch: &mut DBBatch) { + self.put_cycle_history_complete(cycle_info.cycle, cycle_info.complete, batch); + self.put_cycle_history_rng_seed(cycle_info.cycle, cycle_info.rng_seed.clone(), batch); + self.put_cycle_history_final_state_hash_snapshot( + cycle_info.cycle, + cycle_info.final_state_hash_snapshot, + batch, + ); + for (address, roll) in cycle_info.roll_counts.iter() { + self.put_cycle_history_address_entry( + cycle_info.cycle, + address, + Some(roll), + None, + batch, + ); + } + for (address, prod_stats) in cycle_info.production_stats.iter() { + self.put_cycle_history_address_entry( + cycle_info.cycle, + address, + None, + Some(prod_stats), + batch, + ); + } + self.cycle_history_cache + .push_back((cycle_info.cycle, cycle_info.complete)); + } + + /// Helper function to put a the complete flag for a given cycle + fn put_cycle_history_complete(&mut self, cycle: u64, value: bool, batch: &mut DBBatch) { + let db = self.db.read(); + + let prefix = self.cycle_history_cycle_prefix(cycle); + + let serialized_value = if value { &[1] } else { &[0] }; + + db.put_or_update_entry_value(batch, complete_key!(prefix), serialized_value); + + if let Some(index) = self.get_cycle_index(cycle) { + self.cycle_history_cache[index].1 = value; + } + } + + /// Helper function to put a the final_state_hash_snapshot for a given cycle + fn put_cycle_history_final_state_hash_snapshot( + &self, + cycle: u64, + value: Option, + batch: &mut DBBatch, + ) { + let db = self.db.read(); + + let prefix = self.cycle_history_cycle_prefix(cycle); + + let mut serialized_value = Vec::new(); + self.cycle_info_serializer + .cycle_info_serializer + .opt_hash_ser + .serialize(&value, &mut serialized_value) + .expect(CYCLE_HISTORY_SER_ERROR); + + db.put_or_update_entry_value( + batch, + final_state_hash_snapshot_key!(prefix), + &serialized_value, + ); + } + + /// Helper function to put a the rng_seed for a given cycle + fn put_cycle_history_rng_seed(&mut self, cycle: u64, value: BitVec, batch: &mut DBBatch) { + let db = self.db.read(); + + let prefix = self.cycle_history_cycle_prefix(cycle); + + let mut serialized_value = Vec::new(); + self.cycle_info_serializer + .cycle_info_serializer + .bitvec_ser + .serialize(&value, &mut serialized_value) + .expect(CYCLE_HISTORY_SER_ERROR); + + self.rng_seed_cache = Some((cycle, value.clone())); + + db.put_or_update_entry_value(batch, rng_seed_key!(prefix), &serialized_value); + } + + /// Internal function to put an entry for a given address in the cycle history + fn put_cycle_history_address_entry( + &self, + cycle: u64, + address: &Address, + roll_count: Option<&u64>, + production_stats: Option<&ProductionStats>, + batch: &mut DBBatch, + ) { + let db = self.db.read(); + + let prefix = self.cycle_history_cycle_prefix(cycle); + + // Roll count + if let Some(0) = roll_count { + db.delete_key(batch, roll_count_key!(prefix, address)); + } else if let Some(roll_count) = roll_count { + let mut serialized_roll_count = Vec::new(); + self.cycle_info_serializer + .cycle_info_serializer + .u64_ser + .serialize(roll_count, &mut serialized_roll_count) + .expect(CYCLE_HISTORY_SER_ERROR); + db.put_or_update_entry_value( + batch, + roll_count_key!(prefix, address), + &serialized_roll_count, + ); + } + + // Production stats + if let Some(production_stats) = production_stats { + let mut serialized_prod_stats_fail = Vec::new(); + self.cycle_info_serializer + .cycle_info_serializer + .u64_ser + .serialize( + &production_stats.block_failure_count, + &mut serialized_prod_stats_fail, + ) + .expect(CYCLE_HISTORY_SER_ERROR); + db.put_or_update_entry_value( + batch, + prod_stats_fail_key!(prefix, address), + &serialized_prod_stats_fail, + ); + + // Production stats success + let mut serialized_prod_stats_success = Vec::new(); + self.cycle_info_serializer + .cycle_info_serializer + .u64_ser + .serialize( + &production_stats.block_success_count, + &mut serialized_prod_stats_success, + ) + .expect(CYCLE_HISTORY_SER_ERROR); + db.put_or_update_entry_value( + batch, + prod_stats_success_key!(prefix, address), + &serialized_prod_stats_success, + ); + } + } + + /// Internal function to put an entry + pub fn put_deferred_credits_entry( + &self, + slot: &Slot, + address: &Address, + amount: &Amount, + batch: &mut DBBatch, + ) { + let db = self.db.read(); + + let mut serialized_key = Vec::new(); + self.deferred_credits_serializer + .slot_ser + .serialize(slot, &mut serialized_key) + .expect(DEFERRED_CREDITS_SER_ERROR); + self.deferred_credits_serializer + .credits_ser + .address_ser + .serialize(address, &mut serialized_key) + .expect(DEFERRED_CREDITS_SER_ERROR); + + if amount.is_zero() { + db.delete_key(batch, deferred_credits_key!(serialized_key)); } else { - StreamingStep::Finished(None) + let mut serialized_amount = Vec::new(); + self.deferred_credits_serializer + .credits_ser + .amount_ser + .serialize(amount, &mut serialized_amount) + .expect(DEFERRED_CREDITS_SER_ERROR); + + db.put_or_update_entry_value( + batch, + deferred_credits_key!(serialized_key), + &serialized_amount, + ); } } } + +/// Helpers for key and value management +impl PoSFinalState { + /// Helper function to construct the key prefix associated with a given cycle + fn cycle_history_cycle_prefix(&self, cycle: u64) -> Vec { + let mut serialized_key = Vec::new(); + serialized_key.extend_from_slice(CYCLE_HISTORY_PREFIX.as_bytes()); + self.cycle_info_serializer + .cycle_info_serializer + .u64_ser + .serialize(&cycle, &mut serialized_key) + .expect(CYCLE_HISTORY_SER_ERROR); + serialized_key + } + + /// Deserializes the key and value, useful after bootstrap + pub fn is_cycle_history_key_value_valid( + &self, + serialized_key: &[u8], + serialized_value: &[u8], + ) -> bool { + if !serialized_key.starts_with(CYCLE_HISTORY_PREFIX.as_bytes()) { + return false; + } + + let Ok((rest, _cycle)) = self + .cycle_info_deserializer + .cycle_info_deserializer + .u64_deser + .deserialize::(&serialized_key[CYCLE_HISTORY_PREFIX.len()..]) else { + return false; + }; + + if rest.is_empty() { + return false; + } + + match rest[0] { + COMPLETE_IDENT => { + if rest.len() != 1 { + return false; + } + if serialized_value.len() != 1 { + return false; + } + if serialized_value[0] > 1 { + return false; + } + } + RNG_SEED_IDENT => { + if rest.len() != 1 { + return false; + } + let Ok((rest, _rng_seed)) = self + .cycle_info_deserializer + .cycle_info_deserializer + .bitvec_deser + .deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + FINAL_STATE_HASH_SNAPSHOT_IDENT => { + if rest.len() != 1 { + return false; + } + let Ok((rest, _final_state_hash)) = self + .cycle_info_deserializer + .cycle_info_deserializer + .opt_hash_deser + .deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + ROLL_COUNT_IDENT => { + let Ok((rest, _addr)): std::result::Result<(&[u8], Address), nom::Err>> = self + .cycle_info_deserializer + .cycle_info_deserializer + .rolls_deser + .address_deserializer + .deserialize::(&rest[1..]) else { + return false; + }; + if !rest.is_empty() { + return false; + } + let Ok((rest, _addr)) = self + .cycle_info_deserializer + .cycle_info_deserializer + .rolls_deser + .u64_deserializer + .deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + PROD_STATS_IDENT => { + let Ok((rest, _addr)): std::result::Result<(&[u8], Address), nom::Err>> = self + .cycle_info_deserializer + .cycle_info_deserializer + .rolls_deser + .address_deserializer + .deserialize::(&rest[1..]) else { + return false; + }; + if rest.len() != 1 { + return false; + } + + match rest[0] { + PROD_STATS_FAIL_IDENT => { + let Ok((rest, _fail)) = self + .cycle_info_deserializer + .cycle_info_deserializer + .production_stats_deser + .u64_deserializer + .deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + PROD_STATS_SUCCESS_IDENT => { + let Ok((rest, _success)) = self + .cycle_info_deserializer + .cycle_info_deserializer + .production_stats_deser + .u64_deserializer + .deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + } + _ => { + return false; + } + } + } + _ => { + return false; + } + } + + true + } + + /// Deserializes the key and value, useful after bootstrap + pub fn is_deferred_credits_key_value_valid( + &self, + serialized_key: &[u8], + serialized_value: &[u8], + ) -> bool { + if !serialized_key.starts_with(DEFERRED_CREDITS_PREFIX.as_bytes()) { + return false; + } + + let Ok((rest, _slot)) = self.deferred_credits_deserializer.slot_deserializer.deserialize::(&serialized_key[DEFERRED_CREDITS_PREFIX.len()..]) else { + return false; + }; + let Ok((rest, _addr)): std::result::Result<(&[u8], Address), nom::Err>> = self.deferred_credits_deserializer.credit_deserializer.address_deserializer.deserialize::(rest) else { + return false; + }; + if !rest.is_empty() { + return false; + } + + let Ok((rest, _mount)) = self.deferred_credits_deserializer.credit_deserializer.amount_deserializer.deserialize::(serialized_value) else { + return false; + }; + if !rest.is_empty() { + return false; + } + + true + } +} + +/// Helpers for testing +#[cfg(feature = "testing")] +impl PoSFinalState { + /// Queries all the deferred credits in the database + pub fn get_deferred_credits(&self) -> DeferredCredits { + let db = self.db.read(); + let handle = db.db.cf_handle(STATE_CF).expect(CF_ERROR); + + let mut deferred_credits = DeferredCredits::new_with_hash(); + + for (serialized_key, serialized_value) in db + .db + .prefix_iterator_cf(handle, DEFERRED_CREDITS_PREFIX) + .flatten() + { + if !serialized_key.starts_with(DEFERRED_CREDITS_PREFIX.as_bytes()) { + break; + } + let (rest, slot) = self + .deferred_credits_deserializer + .slot_deserializer + .deserialize::(&serialized_key[DEFERRED_CREDITS_PREFIX.len()..]) + .expect(DEFERRED_CREDITS_DESER_ERROR); + let (_, address) = self + .deferred_credits_deserializer + .credit_deserializer + .address_deserializer + .deserialize::(&rest) + .expect(DEFERRED_CREDITS_DESER_ERROR); + + let (_, amount) = self + .deferred_credits_deserializer + .credit_deserializer + .amount_deserializer + .deserialize::(&serialized_value) + .expect(DEFERRED_CREDITS_DESER_ERROR); + + deferred_credits.insert(slot, address, amount); + } + deferred_credits + } +} + +#[test] +fn test_pos_final_state_hash_computation() { + use crate::test_exports::MockSelectorController; + use crate::DeferredCredits; + use crate::PoSFinalState; + use bitvec::prelude::*; + use massa_db::{MassaDB, MassaDBConfig}; + use massa_models::config::constants::{ + MAX_DEFERRED_CREDITS_LENGTH, MAX_PRODUCTION_STATS_LENGTH, MAX_ROLLS_COUNT_LENGTH, + POS_SAVED_CYCLES, + }; + use massa_signature::KeyPair; + use std::collections::HashMap; + use tempfile::TempDir; + + let pos_config = PoSConfig { + periods_per_cycle: 2, + thread_count: 2, + cycle_history_length: POS_SAVED_CYCLES, + max_rolls_length: MAX_ROLLS_COUNT_LENGTH, + max_production_stats_length: MAX_PRODUCTION_STATS_LENGTH, + max_credit_length: MAX_DEFERRED_CREDITS_LENGTH, + }; + + // initialize the executed ops config + let tempdir = TempDir::new().expect("cannot create temp directory"); + let db_config = MassaDBConfig { + path: tempdir.path().to_path_buf(), + max_history_length: 10, + max_new_elements: 100, + thread_count: 2, + }; + let db = Arc::new(RwLock::new(MassaDB::new(db_config))); + let (selector_controller, _) = MockSelectorController::new_with_receiver(); + let init_seed = Hash::compute_from(b""); + let initial_seeds = vec![Hash::compute_from(init_seed.to_bytes()), init_seed]; + + let deferred_credits_deserializer = DeferredCreditsDeserializer::new( + pos_config.thread_count, + pos_config.max_credit_length, + true, + ); + let cycle_info_deserializer = CycleHistoryDeserializer::new( + pos_config.cycle_history_length as u64, + pos_config.max_rolls_length, + pos_config.max_production_stats_length, + ); + + let mut pos_state = PoSFinalState { + config: pos_config, + db: db.clone(), + cycle_history_cache: Default::default(), + rng_seed_cache: None, + selector: selector_controller, + initial_rolls: Default::default(), + initial_seeds, + deferred_credits_serializer: DeferredCreditsSerializer::new(), + deferred_credits_deserializer, + cycle_info_serializer: CycleHistorySerializer::new(), + cycle_info_deserializer, + }; + + pos_state.recompute_pos_state_caches(); + + let mut batch = DBBatch::new(); + pos_state.create_initial_cycle(&mut batch); + db.write() + .write_batch(batch, Default::default(), Some(Slot::new(0, 0))); + + let addr = Address::from_public_key(&KeyPair::generate(0).unwrap().get_public_key()); + + // add changes + let mut roll_changes = PreHashMap::default(); + roll_changes.insert(addr, 10); + let mut production_stats = PreHashMap::default(); + production_stats.insert( + addr, + ProductionStats { + block_success_count: 4, + block_failure_count: 0, + }, + ); + let changes = PoSChanges { + seed_bits: bitvec![u8, Lsb0; 0, 1], + roll_changes: roll_changes.clone(), + production_stats: production_stats.clone(), + deferred_credits: DeferredCredits::new_with_hash(), + }; + + let mut batch = DBBatch::new(); + pos_state + .apply_changes_to_batch(changes, Slot::new(0, 0), false, &mut batch) + .unwrap(); + db.write() + .write_batch(batch, Default::default(), Some(Slot::new(0, 0))); + + // update changes once + roll_changes.clear(); + roll_changes.insert(addr, 20); + production_stats.clear(); + production_stats.insert( + addr, + ProductionStats { + block_success_count: 4, + block_failure_count: 6, + }, + ); + let changes = PoSChanges { + seed_bits: bitvec![u8, Lsb0; 1, 0], + roll_changes: roll_changes.clone(), + production_stats: production_stats.clone(), + deferred_credits: DeferredCredits::new_with_hash(), + }; + + let mut batch = DBBatch::new(); + pos_state + .apply_changes_to_batch(changes, Slot::new(0, 1), false, &mut batch) + .unwrap(); + db.write() + .write_batch(batch, Default::default(), Some(Slot::new(0, 1))); + + // update changes twice + roll_changes.clear(); + roll_changes.insert(addr, 0); + production_stats.clear(); + production_stats.insert( + addr, + ProductionStats { + block_success_count: 4, + block_failure_count: 12, + }, + ); + + let changes = PoSChanges { + seed_bits: bitvec![u8, Lsb0; 0, 1], + roll_changes, + production_stats, + deferred_credits: DeferredCredits::new_with_hash(), + }; + + let mut batch = DBBatch::new(); + pos_state + .apply_changes_to_batch(changes, Slot::new(1, 0), false, &mut batch) + .unwrap(); + db.write() + .write_batch(batch, Default::default(), Some(Slot::new(1, 0))); + + let cycles = pos_state.get_cycle_history_cycles(); + assert_eq!(cycles.len(), 1, "wrong number of cycles"); + assert_eq!(cycles[0].0, 0, "cycle should be the 1st one"); + assert_eq!(cycles[0].1, false, "cycle should not be complete yet"); + + let cycle_info_a = pos_state.get_cycle_info(0); + + let mut prod_stats = HashMap::default(); + prod_stats.insert( + addr, + ProductionStats { + block_success_count: 12, + block_failure_count: 18, + }, + ); + + let cycle_info_b = CycleInfo::new_with_hash( + 0, + false, + BTreeMap::default(), + bitvec![u8, Lsb0; 0, 0, 0, 1, 1, 0, 0, 1], + prod_stats, + ); + + assert_eq!( + cycle_info_a.roll_counts_hash, cycle_info_b.roll_counts_hash, + "roll_counts_hash mismatch" + ); + assert_eq!( + cycle_info_a.production_stats_hash, cycle_info_b.production_stats_hash, + "production_stats_hash mismatch" + ); + assert_eq!( + cycle_info_a.cycle_global_hash, cycle_info_b.cycle_global_hash, + "global_hash mismatch" + ); +} diff --git a/massa-pos-exports/src/test_exports/bootstrap.rs b/massa-pos-exports/src/test_exports/bootstrap.rs index ace927d33e1..aacfa891a10 100644 --- a/massa-pos-exports/src/test_exports/bootstrap.rs +++ b/massa-pos-exports/src/test_exports/bootstrap.rs @@ -16,7 +16,9 @@ impl Default for SelectorConfig { endorsement_count: ENDORSEMENT_COUNT, max_draw_cache: 10, periods_per_cycle: PERIODS_PER_CYCLE, - genesis_address: Address::from_public_key(&KeyPair::generate().get_public_key()), + genesis_address: Address::from_public_key( + &KeyPair::generate(0).unwrap().get_public_key(), + ), channel_size: CHANNEL_SIZE, } } @@ -25,21 +27,30 @@ impl Default for SelectorConfig { /// Compare two PoS States pub fn assert_eq_pos_state(s1: &PoSFinalState, s2: &PoSFinalState) { assert_eq!( - s1.cycle_history.len(), - s2.cycle_history.len(), - "PoS cycle_history len mismatching" + s1.cycle_history_cache.len(), + s2.cycle_history_cache.len(), + "PoS cycle_history_cache len mismatching" ); assert_eq!( - s1.cycle_history, s2.cycle_history, - "PoS cycle_history mismatching" + s1.cycle_history_cache, s2.cycle_history_cache, + "PoS cycle_history_cache mismatching" ); + for cycle in s1.cycle_history_cache.clone() { + assert_eq!( + s1.get_cycle_info(cycle.0), + s2.get_cycle_info(cycle.0), + "PoS cycle_history mismatching" + ); + } + let deferred_credits_s1 = s1.get_deferred_credits(); + let deferred_credits_s2 = s2.get_deferred_credits(); assert_eq!( - s1.deferred_credits.credits.len(), - s2.deferred_credits.credits.len(), + deferred_credits_s1.credits.len(), + deferred_credits_s2.credits.len(), "PoS deferred_credits len mismatching" ); assert_eq!( - s1.deferred_credits.credits, s2.deferred_credits.credits, + deferred_credits_s1.credits, deferred_credits_s2.credits, "PoS deferred_credits mismatching" ); assert_eq!( diff --git a/massa-pos-worker/Cargo.toml b/massa-pos-worker/Cargo.toml index 575f1dd759b..76f82690d66 100644 --- a/massa-pos-worker/Cargo.toml +++ b/massa-pos-worker/Cargo.toml @@ -1,27 +1,21 @@ [package] name = "massa_pos_worker" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bitvec = { version = "1.0", features = ["serde"] } parking_lot = { version = "0.12", features = ["deadlock_detection"] } -rand = "0.8.5" # pin exact version for determinism -rand_distr = "0.4.3" # pin exact version for determinism -rand_xoshiro = "0.6" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" +rand = "=0.8.5" +rand_distr = "=0.4.3" +rand_xoshiro = "=0.6" tracing = "0.1" # custom modules -massa_final_state = { path = "../massa-final-state" } massa_hash = { path = "../massa-hash" } massa_models = { path = "../massa-models" } massa_pos_exports = { path = "../massa-pos-exports" } -massa_signature = { path = "../massa-signature" } -massa_time = { path = "../massa-time" } [dev-dependencies] # custom modules with testing enabled diff --git a/massa-proto/Cargo.toml b/massa-proto/Cargo.toml index 1b176f732e8..5f272b638f7 100644 --- a/massa-proto/Cargo.toml +++ b/massa-proto/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "massa_proto" -version = "0.1.0" +version = "0.23.0" edition = "2021" description = "Protobuf definitions for the Massa blockchain" repository = "https://github.com/massalabs/massa/" diff --git a/massa-proto/build.rs b/massa-proto/build.rs index 367919dfa60..53576b543fd 100644 --- a/massa-proto/build.rs +++ b/massa-proto/build.rs @@ -39,7 +39,7 @@ mod tonic { .map_err(|e| format!("protobuf compilation error: {:?}", e))?; // Generate documentation for the protobuf API - generate_doc(&protos).map_err(|e| format!("protobuf documentation error: {:?}", e))?; + // generate_doc(&protos).map_err(|e| format!("protobuf documentation error: {:?}", e))?; // Return Ok if the build and documentation generation were successful Ok(()) diff --git a/massa-proto/doc/index.html b/massa-proto/doc/index.html index 1e1cab63f44..a0a7a50f900 100644 --- a/massa-proto/doc/index.html +++ b/massa-proto/doc/index.html @@ -652,7 +652,7 @@

Table of Contents

  • - EScExecutionEventStatus + EExecutionOutputStatus
  • diff --git a/massa-proto/proto/massa/api/v1/api.proto b/massa-proto/proto/massa/api/v1/api.proto index 51a20b723ac..270cc230b74 100644 --- a/massa-proto/proto/massa/api/v1/api.proto +++ b/massa-proto/proto/massa/api/v1/api.proto @@ -12,6 +12,7 @@ import "google/api/annotations.proto"; import "google/rpc/status.proto"; import "operation.proto"; import "slot.proto"; +import "versioning.proto"; option csharp_namespace = "Com.Massa.Api.V1"; option go_package = "github.com/massalabs/massa/api/v1;v1"; @@ -96,6 +97,11 @@ service MassaService { option (google.api.http) = {get: "/v1/version"}; } + // Get + rpc GetMipStatus(GetMipStatusRequest) returns (GetMipStatusResponse) { + option (google.api.http) = {get: "/v1/mip_status"}; + } + // ███████╗████████╗██████╗ ███████╗ █████╗ ███╗ ███╗ // ██╔════╝╚══██╔══╝██╔══██╗██╔════╝██╔══██╗████╗ ████║ // ███████╗ ██║ ██████╔╝█████╗ ███████║██╔████╔██║ @@ -414,6 +420,19 @@ message SelectorDraws { repeated IndexedSlot next_endorsement_draws = 3; } +message GetMipStatusRequest { + // Request id + string id = 1; +} + +// GetMipStatusResponse holds response from GetMipStatus +message GetMipStatusResponse { + // Request id + string id = 1; + // MipInfo - status id entry + repeated MipStatusEntry entry = 2; +} + // GetTransactionsThroughputRequest holds request for GetTransactionsThroughput message GetTransactionsThroughputRequest { // Request id diff --git a/massa-proto/proto/massa/api/v1/execution.proto b/massa-proto/proto/massa/api/v1/execution.proto index c8c3ca5a0da..a95904c728d 100644 --- a/massa-proto/proto/massa/api/v1/execution.proto +++ b/massa-proto/proto/massa/api/v1/execution.proto @@ -131,14 +131,19 @@ message AsyncPoolChangeValue { // The type of the change AsyncPoolChangeType type = 1; // AsyncPool message - AsyncMessage async_message = 2; + oneof message { + // Created ledger entry + AsyncMessage created_message = 2; + // Update ledger entry + AsyncMessageUpdate updated_message = 3; + } } // AsyncPoolChangeType type enum enum AsyncPoolChangeType { ASYNC_POOL_CHANGE_TYPE_UNSPECIFIED = 0; // Default enum value - ASYNC_POOL_CHANGE_TYPE_ADD = 1; // Add type - ASYNC_POOL_CHANGE_TYPE_ACTIVATE = 2; // Activate only type + ASYNC_POOL_CHANGE_TYPE_SET = 1; // Add type + ASYNC_POOL_CHANGE_TYPE_UPDATE = 2; // Activate only type ASYNC_POOL_CHANGE_TYPE_DELETE = 3; // Delete only type } @@ -179,6 +184,91 @@ message AsyncMessage { string hash = 14; } +// Asynchronous smart contract message +message AsyncMessageUpdate { + // Change the slot at which the message was emitted + SetOrKeepSlot emission_slot = 1; + // Change the index of the emitted message within the `emission_slot`. + // This is used for disambiguate the emission of multiple messages at the same slot. + SetOrKeepFixed64 emission_index = 2; + // Change the address that sent the message + SetOrKeepString sender = 3; + // Change the address towards which the message is being sent + SetOrKeepString destination = 4; + // Change the handler function name within the destination address' bytecode + SetOrKeepString handler = 5; + // Change the maximum gas to use when processing the message + SetOrKeepFixed64 max_gas = 6; + // Change the fee paid by the sender when the message is processed. + SetOrKeepFixed64 fee = 7; + // Change the coins sent from the sender to the target address of the message. + // Those coins are spent by the sender address when the message is sent, + // and credited to the destination address when receiving the message. + // In case of failure or discard, those coins are reimbursed to the sender. + SetOrKeepFixed64 coins = 8; + // Change the slot at which the message starts being valid (bound included in the validity range) + SetOrKeepSlot validity_start = 9; + // Change the slot at which the message stops being valid (bound not included in the validity range) + SetOrKeepSlot validity_end = 10; + // Change the raw payload data of the message + SetOrKeepBytes data = 11; + // Change the trigger that define whenever a message can be executed + SetOrKeepAsyncMessageTrigger trigger = 12; + // Change the boolean that determine if the message can be executed. For messages without filter this boolean is always true. + // For messages with filter, this boolean is true if the filter has been matched between `validity_start` and current slot. + SetOrKeepBool can_be_executed = 13; + // Change the hash of the message + SetOrKeepString hash = 14; +} + +// Set or Keep Slot +message SetOrKeepSlot { + // The type of the change + SetOrKeepType type = 1; + // The value of that entry (optional) + optional Slot value = 2; +} + +// Set or Keep Fixed64 +message SetOrKeepFixed64 { + // The type of the change + SetOrKeepType type = 1; + // The value of that entry (optional) + optional fixed64 value = 2; +} + +// Set or Keep String +message SetOrKeepString { + // The type of the change + SetOrKeepType type = 1; + // The value of that entry (optional) + optional string value = 2; +} + +// Set or Keep Bytes +message SetOrKeepBytes { + // The type of the change + SetOrKeepType type = 1; + // The value of that entry (optional) + optional bytes value = 2; +} + +// Set or Keep Bool +message SetOrKeepBool { + // The type of the change + SetOrKeepType type = 1; + // The value of that entry (optional) + optional bool value = 2; +} + +// Set or Keep AsyncMessageTrigger +message SetOrKeepAsyncMessageTrigger { + // The type of the change + SetOrKeepType type = 1; + // The value of that entry (optional) + optional AsyncMessageTrigger value = 2; +} + // Structure defining a trigger for an asynchronous message message AsyncMessageTrigger { // Filter on the address diff --git a/massa-proto/proto/massa/api/v1/versioning.proto b/massa-proto/proto/massa/api/v1/versioning.proto new file mode 100644 index 00000000000..b360ed52ad7 --- /dev/null +++ b/massa-proto/proto/massa/api/v1/versioning.proto @@ -0,0 +1,55 @@ +// Copyright (c) 2023 MASSA LABS + +syntax = "proto3"; + +package massa.api.v1; + +option csharp_namespace = "Com.Massa.Api.V1"; +option go_package = "github.com/massalabs/massa/api/v1;v1"; +option java_multiple_files = true; +option java_package = "com.massa.api.v1"; +option objc_class_prefix = "GRPC"; +option php_namespace = "Com\\Massa\\Api\\V1"; +option ruby_package = "Com::Massa::Api::V1"; +option swift_prefix = "GRPC"; + +// Entry for GetMipStatusResponse +message MipStatusEntry { + // Mip info + MipInfo mip_info = 1; + // state id + ComponentStateId state_id = 2; +} + +// Same as MipInfo struct in versioning package +message MipInfo { + string name = 1; + fixed32 version = 2; + fixed64 start = 3; + fixed64 timeout = 4; + fixed64 activation_delay = 5; + repeated MipComponentEntry components = 6; +} + +message MipComponentEntry { + MipComponent kind = 1; + fixed32 version = 2; +} + +// Same as ComponentStateId enum in versioning package +enum ComponentStateId { + COMPONENT_STATE_ID_UNSPECIFIED = 0; + COMPONENT_STATE_ID_ERROR = 1; + COMPONENT_STATE_ID_DEFINED = 2; + COMPONENT_STATE_ID_STARTED = 3; + COMPONENT_STATE_ID_LOCKEDIN = 4; + COMPONENT_STATE_ID_ACTIVE = 5; + COMPONENT_STATE_ID_FAILED = 6; +} + +// Versioning component enum +enum MipComponent { + MIP_COMPONENT_UNSPECIFIED = 0; + MIP_COMPONENT_ADDRESS = 1; + MIP_COMPONENT_KEYPAIR = 2; +} diff --git a/massa-proto/src/api.bin b/massa-proto/src/api.bin index 4920e697a46..2f0e5ac10f3 100644 Binary files a/massa-proto/src/api.bin and b/massa-proto/src/api.bin differ diff --git a/massa-proto/src/massa.api.v1.rs b/massa-proto/src/massa.api.v1.rs index 12a324e99b7..3e54594928d 100644 --- a/massa-proto/src/massa.api.v1.rs +++ b/massa-proto/src/massa.api.v1.rs @@ -545,8 +545,22 @@ pub struct AsyncPoolChangeValue { #[prost(enumeration = "AsyncPoolChangeType", tag = "1")] pub r#type: i32, /// AsyncPool message - #[prost(message, optional, tag = "2")] - pub async_message: ::core::option::Option, + #[prost(oneof = "async_pool_change_value::Message", tags = "2, 3")] + pub message: ::core::option::Option, +} +/// Nested message and enum types in `AsyncPoolChangeValue`. +pub mod async_pool_change_value { + /// AsyncPool message + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Message { + /// Created ledger entry + #[prost(message, tag = "2")] + CreatedMessage(super::AsyncMessage), + /// Update ledger entry + #[prost(message, tag = "3")] + UpdatedMessage(super::AsyncMessageUpdate), + } } /// Asynchronous smart contract message #[allow(clippy::derive_partial_eq_without_eq)] @@ -600,6 +614,124 @@ pub struct AsyncMessage { #[prost(string, tag = "14")] pub hash: ::prost::alloc::string::String, } +/// Asynchronous smart contract message +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AsyncMessageUpdate { + /// Change the slot at which the message was emitted + #[prost(message, optional, tag = "1")] + pub emission_slot: ::core::option::Option, + /// Change the index of the emitted message within the `emission_slot`. + /// This is used for disambiguate the emission of multiple messages at the same slot. + #[prost(message, optional, tag = "2")] + pub emission_index: ::core::option::Option, + /// Change the address that sent the message + #[prost(message, optional, tag = "3")] + pub sender: ::core::option::Option, + /// Change the address towards which the message is being sent + #[prost(message, optional, tag = "4")] + pub destination: ::core::option::Option, + /// Change the handler function name within the destination address' bytecode + #[prost(message, optional, tag = "5")] + pub handler: ::core::option::Option, + /// Change the maximum gas to use when processing the message + #[prost(message, optional, tag = "6")] + pub max_gas: ::core::option::Option, + /// Change the fee paid by the sender when the message is processed. + #[prost(message, optional, tag = "7")] + pub fee: ::core::option::Option, + /// Change the coins sent from the sender to the target address of the message. + /// Those coins are spent by the sender address when the message is sent, + /// and credited to the destination address when receiving the message. + /// In case of failure or discard, those coins are reimbursed to the sender. + #[prost(message, optional, tag = "8")] + pub coins: ::core::option::Option, + /// Change the slot at which the message starts being valid (bound included in the validity range) + #[prost(message, optional, tag = "9")] + pub validity_start: ::core::option::Option, + /// Change the slot at which the message stops being valid (bound not included in the validity range) + #[prost(message, optional, tag = "10")] + pub validity_end: ::core::option::Option, + /// Change the raw payload data of the message + #[prost(message, optional, tag = "11")] + pub data: ::core::option::Option, + /// Change the trigger that define whenever a message can be executed + #[prost(message, optional, tag = "12")] + pub trigger: ::core::option::Option, + /// Change the boolean that determine if the message can be executed. For messages without filter this boolean is always true. + /// For messages with filter, this boolean is true if the filter has been matched between `validity_start` and current slot. + #[prost(message, optional, tag = "13")] + pub can_be_executed: ::core::option::Option, + /// Change the hash of the message + #[prost(message, optional, tag = "14")] + pub hash: ::core::option::Option, +} +/// Set or Keep Slot +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SetOrKeepSlot { + /// The type of the change + #[prost(enumeration = "SetOrKeepType", tag = "1")] + pub r#type: i32, + /// The value of that entry (optional) + #[prost(message, optional, tag = "2")] + pub value: ::core::option::Option, +} +/// Set or Keep Fixed64 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SetOrKeepFixed64 { + /// The type of the change + #[prost(enumeration = "SetOrKeepType", tag = "1")] + pub r#type: i32, + /// The value of that entry (optional) + #[prost(fixed64, optional, tag = "2")] + pub value: ::core::option::Option, +} +/// Set or Keep String +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SetOrKeepString { + /// The type of the change + #[prost(enumeration = "SetOrKeepType", tag = "1")] + pub r#type: i32, + /// The value of that entry (optional) + #[prost(string, optional, tag = "2")] + pub value: ::core::option::Option<::prost::alloc::string::String>, +} +/// Set or Keep Bytes +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SetOrKeepBytes { + /// The type of the change + #[prost(enumeration = "SetOrKeepType", tag = "1")] + pub r#type: i32, + /// The value of that entry (optional) + #[prost(bytes = "vec", optional, tag = "2")] + pub value: ::core::option::Option<::prost::alloc::vec::Vec>, +} +/// Set or Keep Bool +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SetOrKeepBool { + /// The type of the change + #[prost(enumeration = "SetOrKeepType", tag = "1")] + pub r#type: i32, + /// The value of that entry (optional) + #[prost(bool, optional, tag = "2")] + pub value: ::core::option::Option, +} +/// Set or Keep AsyncMessageTrigger +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SetOrKeepAsyncMessageTrigger { + /// The type of the change + #[prost(enumeration = "SetOrKeepType", tag = "1")] + pub r#type: i32, + /// The value of that entry (optional) + #[prost(message, optional, tag = "2")] + pub value: ::core::option::Option, +} /// Structure defining a trigger for an asynchronous message #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -863,9 +995,9 @@ pub enum AsyncPoolChangeType { /// Default enum value Unspecified = 0, /// Add type - Add = 1, + Set = 1, /// Activate only type - Activate = 2, + Update = 2, /// Delete only type Delete = 3, } @@ -877,8 +1009,8 @@ impl AsyncPoolChangeType { pub fn as_str_name(&self) -> &'static str { match self { AsyncPoolChangeType::Unspecified => "ASYNC_POOL_CHANGE_TYPE_UNSPECIFIED", - AsyncPoolChangeType::Add => "ASYNC_POOL_CHANGE_TYPE_ADD", - AsyncPoolChangeType::Activate => "ASYNC_POOL_CHANGE_TYPE_ACTIVATE", + AsyncPoolChangeType::Set => "ASYNC_POOL_CHANGE_TYPE_SET", + AsyncPoolChangeType::Update => "ASYNC_POOL_CHANGE_TYPE_UPDATE", AsyncPoolChangeType::Delete => "ASYNC_POOL_CHANGE_TYPE_DELETE", } } @@ -886,8 +1018,8 @@ impl AsyncPoolChangeType { pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "ASYNC_POOL_CHANGE_TYPE_UNSPECIFIED" => Some(Self::Unspecified), - "ASYNC_POOL_CHANGE_TYPE_ADD" => Some(Self::Add), - "ASYNC_POOL_CHANGE_TYPE_ACTIVATE" => Some(Self::Activate), + "ASYNC_POOL_CHANGE_TYPE_SET" => Some(Self::Set), + "ASYNC_POOL_CHANGE_TYPE_UPDATE" => Some(Self::Update), "ASYNC_POOL_CHANGE_TYPE_DELETE" => Some(Self::Delete), _ => None, } @@ -996,6 +1128,114 @@ impl SetOrDeleteType { } } } +/// Entry for GetMipStatusResponse +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MipStatusEntry { + /// Mip info + #[prost(message, optional, tag = "1")] + pub mip_info: ::core::option::Option, + /// state id + #[prost(enumeration = "ComponentStateId", tag = "2")] + pub state_id: i32, +} +/// Same as MipInfo struct in versioning package +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MipInfo { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(fixed32, tag = "2")] + pub version: u32, + #[prost(fixed64, tag = "3")] + pub start: u64, + #[prost(fixed64, tag = "4")] + pub timeout: u64, + #[prost(fixed64, tag = "5")] + pub activation_delay: u64, + #[prost(message, repeated, tag = "6")] + pub components: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MipComponentEntry { + #[prost(enumeration = "MipComponent", tag = "1")] + pub kind: i32, + #[prost(fixed32, tag = "2")] + pub version: u32, +} +/// Same as ComponentStateId enum in versioning package +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ComponentStateId { + Unspecified = 0, + Error = 1, + Defined = 2, + Started = 3, + Lockedin = 4, + Active = 5, + Failed = 6, +} +impl ComponentStateId { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ComponentStateId::Unspecified => "COMPONENT_STATE_ID_UNSPECIFIED", + ComponentStateId::Error => "COMPONENT_STATE_ID_ERROR", + ComponentStateId::Defined => "COMPONENT_STATE_ID_DEFINED", + ComponentStateId::Started => "COMPONENT_STATE_ID_STARTED", + ComponentStateId::Lockedin => "COMPONENT_STATE_ID_LOCKEDIN", + ComponentStateId::Active => "COMPONENT_STATE_ID_ACTIVE", + ComponentStateId::Failed => "COMPONENT_STATE_ID_FAILED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "COMPONENT_STATE_ID_UNSPECIFIED" => Some(Self::Unspecified), + "COMPONENT_STATE_ID_ERROR" => Some(Self::Error), + "COMPONENT_STATE_ID_DEFINED" => Some(Self::Defined), + "COMPONENT_STATE_ID_STARTED" => Some(Self::Started), + "COMPONENT_STATE_ID_LOCKEDIN" => Some(Self::Lockedin), + "COMPONENT_STATE_ID_ACTIVE" => Some(Self::Active), + "COMPONENT_STATE_ID_FAILED" => Some(Self::Failed), + _ => None, + } + } +} +/// Same as MipComponent enum in versioning package +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum MipComponent { + Unspecified = 0, + Address = 1, + Keypair = 2, +} +impl MipComponent { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + MipComponent::Unspecified => "MIP_COMPONENT_UNSPECIFIED", + MipComponent::Address => "MIP_COMPONENT_ADDRESS", + MipComponent::Keypair => "MIP_COMPONENT_KEYPAIR", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "MIP_COMPONENT_UNSPECIFIED" => Some(Self::Unspecified), + "MIP_COMPONENT_ADDRESS" => Some(Self::Address), + "MIP_COMPONENT_KEYPAIR" => Some(Self::Keypair), + _ => None, + } + } +} /// GetBlocksRequest holds request for GetBlocks #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1382,6 +1622,24 @@ pub struct SelectorDraws { #[prost(message, repeated, tag = "3")] pub next_endorsement_draws: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetMipStatusRequest { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, +} +/// GetMipStatusResponse holds response from GetMipStatus +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetMipStatusResponse { + /// Request id + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// MipInfo - status id entry + #[prost(message, repeated, tag = "2")] + pub entry: ::prost::alloc::vec::Vec, +} /// GetTransactionsThroughputRequest holds request for GetTransactionsThroughput #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2137,6 +2395,32 @@ pub mod massa_service_client { .insert(GrpcMethod::new("massa.api.v1.MassaService", "GetVersion")); self.inner.unary(req, path, codec).await } + /// Get + pub async fn get_mip_status( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/massa.api.v1.MassaService/GetMipStatus", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("massa.api.v1.MassaService", "GetMipStatus")); + self.inner.unary(req, path, codec).await + } /// New received and produced blocks pub async fn new_blocks( &mut self, @@ -2520,6 +2804,14 @@ pub mod massa_service_server { tonic::Response, tonic::Status, >; + /// Get + async fn get_mip_status( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the NewBlocks method. type NewBlocksStream: futures_core::Stream< Item = std::result::Result, @@ -3212,6 +3504,52 @@ pub mod massa_service_server { }; Box::pin(fut) } + "/massa.api.v1.MassaService/GetMipStatus" => { + #[allow(non_camel_case_types)] + struct GetMipStatusSvc(pub Arc); + impl< + T: MassaService, + > tonic::server::UnaryService + for GetMipStatusSvc { + type Response = super::GetMipStatusResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).get_mip_status(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetMipStatusSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/massa.api.v1.MassaService/NewBlocks" => { #[allow(non_camel_case_types)] struct NewBlocksSvc(pub Arc); diff --git a/massa-protocol-exports/Cargo.toml b/massa-protocol-exports/Cargo.toml index e3576f6b133..13cf19ee0d0 100644 --- a/massa-protocol-exports/Cargo.toml +++ b/massa-protocol-exports/Cargo.toml @@ -1,16 +1,17 @@ [package] name = "massa_protocol_exports" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" [dependencies] displaydoc = "0.2" thiserror = "1.0" -nom = "7.1" +nom = "=7.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -peernet = { git = "https://github.com/massalabs/PeerNet", rev = "1bb1f452bf63b78a89eb9542fb019b88d894c664" } +# TODO tag peernet version +peernet = { git = "https://github.com/massalabs/PeerNet", rev = "bf8adf5" } tempfile = { version = "3.3", optional = true } # use with testing feature mockall = "0.11.4" @@ -20,10 +21,11 @@ massa_time = { path = "../massa-time" } massa_storage = { path = "../massa-storage" } massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature"} -massa_hash = { path = "../massa-hash", optional = true } +massa_versioning = { path = "../massa-versioning" } +massa_hash = { path = "../massa-hash"} [dev-dependencies] tempfile = "3.3" [features] -testing = ["tempfile", "massa_hash"] +testing = ["tempfile"] diff --git a/massa-protocol-exports/src/bootstrap_peers.rs b/massa-protocol-exports/src/bootstrap_peers.rs index e91410d07bd..e2b3f754dbf 100644 --- a/massa-protocol-exports/src/bootstrap_peers.rs +++ b/massa-protocol-exports/src/bootstrap_peers.rs @@ -1,23 +1,20 @@ -use std::collections::HashMap; -use std::net::SocketAddr; -use std::ops::Bound::Included; - +use crate::{PeerId, PeerIdDeserializer, PeerIdSerializer}; use massa_models::serialization::{IpAddrDeserializer, IpAddrSerializer}; use massa_serialization::{ Deserializer, SerializeError, Serializer, U16VarIntDeserializer, U16VarIntSerializer, U32VarIntDeserializer, U32VarIntSerializer, }; -use massa_signature::PUBLIC_KEY_SIZE_BYTES; use nom::{ - bytes::complete::take, error::{context, ContextError, ParseError}, multi::length_count, sequence::tuple, IResult, Parser, }; -use peernet::peer_id::PeerId; use peernet::transports::TransportType; use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::net::SocketAddr; +use std::ops::Bound::Included; /// Peer info provided in bootstrap #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -35,6 +32,7 @@ pub struct BootstrapPeersSerializer { u32_serializer: U32VarIntSerializer, ip_addr_serializer: IpAddrSerializer, port_serializer: U16VarIntSerializer, + peer_id_serializer: PeerIdSerializer, } impl BootstrapPeersSerializer { @@ -44,6 +42,7 @@ impl BootstrapPeersSerializer { u32_serializer: U32VarIntSerializer::new(), ip_addr_serializer: IpAddrSerializer::new(), port_serializer: U16VarIntSerializer::new(), + peer_id_serializer: PeerIdSerializer::new(), } } } @@ -58,16 +57,16 @@ impl Serializer for BootstrapPeersSerializer { /// ``` /// use massa_protocol_exports::{BootstrapPeers, PeerId, TransportType, BootstrapPeersSerializer}; /// use massa_serialization::Serializer; - /// use peernet::types::KeyPair; + /// use massa_signature::KeyPair; /// use std::collections::HashMap; /// use std::str::FromStr; /// - /// let keypair1 = KeyPair::generate(); + /// let keypair1 = KeyPair::generate(0).unwrap(); /// let mut peers = vec![]; /// let mut listeners1 = HashMap::default(); /// listeners1.insert("127.0.0.1:8080".parse().unwrap(), TransportType::Tcp); /// peers.push((PeerId::from_public_key(keypair1.get_public_key()), listeners1)); - /// let mut keypair2 = KeyPair::generate(); + /// let mut keypair2 = KeyPair::generate(0).unwrap(); /// let mut listeners2 = HashMap::default(); /// listeners2.insert("[::1]:8080".parse().unwrap(), TransportType::Tcp); /// peers.push((PeerId::from_public_key(keypair1.get_public_key()), listeners2)); @@ -89,7 +88,7 @@ impl Serializer for BootstrapPeersSerializer { })?; self.u32_serializer.serialize(&peers_count, buffer)?; for (peer_id, listeners) in value.0.iter() { - buffer.extend_from_slice(&peer_id.to_bytes()); + self.peer_id_serializer.serialize(peer_id, buffer)?; self.u32_serializer .serialize(&(listeners.len() as u32), buffer)?; for (addr, transport_type) in listeners.iter() { @@ -108,6 +107,7 @@ pub struct BootstrapPeersDeserializer { length_listeners_deserializer: U32VarIntDeserializer, ip_addr_deserializer: IpAddrDeserializer, port_deserializer: U16VarIntDeserializer, + peer_id_deserializer: PeerIdDeserializer, } impl BootstrapPeersDeserializer { @@ -125,6 +125,7 @@ impl BootstrapPeersDeserializer { ), ip_addr_deserializer: IpAddrDeserializer::new(), port_deserializer: U16VarIntDeserializer::new(Included(0), Included(u16::MAX)), + peer_id_deserializer: PeerIdDeserializer::new(), } } } @@ -133,16 +134,16 @@ impl Deserializer for BootstrapPeersDeserializer { /// ``` /// use massa_protocol_exports::{BootstrapPeers, PeerId, TransportType, BootstrapPeersSerializer, BootstrapPeersDeserializer}; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; - /// use peernet::types::KeyPair; + /// use massa_signature::KeyPair; /// use std::collections::HashMap; /// use std::str::FromStr; /// - /// let keypair1 = KeyPair::generate(); + /// let keypair1 = KeyPair::generate(0).unwrap(); /// let mut peers = vec![]; /// let mut listeners1 = HashMap::default(); /// listeners1.insert("127.0.0.1:8080".parse().unwrap(), TransportType::Tcp); /// peers.push((PeerId::from_public_key(keypair1.get_public_key()), listeners1)); - /// let mut keypair2 = KeyPair::generate(); + /// let mut keypair2 = KeyPair::generate(0).unwrap(); /// let mut listeners2 = HashMap::default(); /// listeners2.insert("[::1]:8080".parse().unwrap(), TransportType::Tcp); /// peers.push((PeerId::from_public_key(keypair1.get_public_key()), listeners2)); @@ -166,24 +167,7 @@ impl Deserializer for BootstrapPeersDeserializer { context("Failed Peer deserialization", |input| { tuple(( context("Failed PeerId deserialization", |input: &'a [u8]| { - let (rest, peer_id) = take(32usize)(input)?; - Ok(( - rest, - PeerId::from_bytes( - peer_id[..PUBLIC_KEY_SIZE_BYTES].try_into().map_err(|_| { - nom::Err::Error(ParseError::from_error_kind( - input, - nom::error::ErrorKind::Count, - )) - })?, - ) - .map_err(|_| { - nom::Err::Error(ParseError::from_error_kind( - input, - nom::error::ErrorKind::Count, - )) - })?, - )) + self.peer_id_deserializer.deserialize(input) }), length_count( context("Failed length deserialization", |input| { diff --git a/massa-protocol-exports/src/controller_trait.rs b/massa-protocol-exports/src/controller_trait.rs index e671a9e0f80..3a0b36ecf73 100644 --- a/massa-protocol-exports/src/controller_trait.rs +++ b/massa-protocol-exports/src/controller_trait.rs @@ -6,12 +6,12 @@ use std::net::SocketAddr; use crate::error::ProtocolError; use crate::BootstrapPeers; +use crate::PeerId; use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_models::stats::NetworkStats; use massa_models::{block_header::SecuredHeader, block_id::BlockId}; use massa_storage::Storage; use peernet::peer::PeerConnectionType; -use peernet::peer_id::PeerId; #[cfg_attr(any(test, feature = "testing"), mockall::automock)] pub trait ProtocolController: Send + Sync { diff --git a/massa-protocol-exports/src/error.rs b/massa-protocol-exports/src/error.rs index e3af7ef63be..74c1a5f33a9 100644 --- a/massa-protocol-exports/src/error.rs +++ b/massa-protocol-exports/src/error.rs @@ -2,6 +2,7 @@ use displaydoc::Display; use massa_models::error::ModelsError; +use massa_versioning::versioning_factory::FactoryError; use std::net::IpAddr; use thiserror::Error; @@ -41,6 +42,15 @@ pub enum ProtocolError { InvalidOperationError(String), /// Listener error: {0} ListenerError(String), + /// Incompatible newtork version: local current is {local} received is {received} + IncompatibleNetworkVersion { + /// local current version + local: u32, + /// received version from incoming header + received: u32, + }, + /// Versioned factory error: {0} + FactoryError(#[from] FactoryError), } #[derive(Debug)] diff --git a/massa-protocol-exports/src/lib.rs b/massa-protocol-exports/src/lib.rs index 961ef5dcea3..1d635cbe21f 100644 --- a/massa-protocol-exports/src/lib.rs +++ b/massa-protocol-exports/src/lib.rs @@ -1,6 +1,7 @@ mod bootstrap_peers; mod controller_trait; mod error; +mod peer_id; mod settings; pub use bootstrap_peers::{ @@ -8,8 +9,8 @@ pub use bootstrap_peers::{ }; pub use controller_trait::{ProtocolController, ProtocolManager}; pub use error::ProtocolError; +pub use peer_id::{PeerId, PeerIdDeserializer, PeerIdSerializer}; pub use peernet::peer::PeerConnectionType; -pub use peernet::peer_id::PeerId; pub use peernet::transports::TransportType; pub use settings::{PeerCategoryInfo, ProtocolConfig}; diff --git a/massa-protocol-exports/src/peer_id.rs b/massa-protocol-exports/src/peer_id.rs new file mode 100644 index 00000000000..a1fed1809cd --- /dev/null +++ b/massa-protocol-exports/src/peer_id.rs @@ -0,0 +1,143 @@ +use std::{fmt::Display, hash::Hash, str::FromStr}; + +use massa_hash::Hash as MassaHash; +use massa_serialization::{Deserializer, Serializer}; +use massa_signature::{KeyPair, PublicKey, PublicKeyDeserializer, Signature}; +use peernet::peer_id::PeerId as PeernetPeerId; + +use crate::ProtocolError; + +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct PeerId { + public_key: PublicKey, +} + +impl PeerId { + pub fn from_public_key(public_key: PublicKey) -> Self { + Self { public_key } + } + + pub fn get_public_key(&self) -> PublicKey { + self.public_key + } + + pub fn verify_signature( + &self, + hash: &MassaHash, + signature: &Signature, + ) -> Result<(), ProtocolError> { + self.public_key + .verify_signature(hash, signature) + .map_err(|err| ProtocolError::GeneralProtocolError(err.to_string())) + } +} + +impl FromStr for PeerId { + type Err = ProtocolError; + + fn from_str(s: &str) -> Result { + let public_key = PublicKey::from_str(s) + .map_err(|err| ProtocolError::GeneralProtocolError(err.to_string()))?; + Ok(Self { public_key }) + } +} + +impl Display for PeerId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.public_key.fmt(f) + } +} + +impl PeernetPeerId for PeerId { + fn generate() -> Self { + Self { + public_key: KeyPair::generate(0).unwrap().get_public_key(), + } + } +} + +#[derive(Default, Clone)] +pub struct PeerIdSerializer {} + +impl PeerIdSerializer { + pub fn new() -> Self { + Self {} + } +} + +impl Serializer for PeerIdSerializer { + fn serialize( + &self, + value: &PeerId, + buffer: &mut Vec, + ) -> Result<(), massa_serialization::SerializeError> { + buffer.extend_from_slice(&value.public_key.to_bytes()); + Ok(()) + } +} + +#[derive(Default, Clone)] +pub struct PeerIdDeserializer { + public_key_deserializer: PublicKeyDeserializer, +} + +impl PeerIdDeserializer { + pub fn new() -> Self { + PeerIdDeserializer { + public_key_deserializer: PublicKeyDeserializer::new(), + } + } +} + +impl Deserializer for PeerIdDeserializer { + fn deserialize<'a, E: nom::error::ParseError<&'a [u8]> + nom::error::ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> nom::IResult<&'a [u8], PeerId, E> { + self.public_key_deserializer + .deserialize(buffer) + .map(|(buffer, public_key)| (buffer, PeerId { public_key })) + } +} + +impl ::serde::Serialize for PeerId { + /// `::serde::Serialize` trait for `PeerId` + /// + fn serialize(&self, s: S) -> Result { + s.collect_str(&self.to_string()) + } +} + +impl<'de> ::serde::Deserialize<'de> for PeerId { + /// `::serde::Deserialize` trait for `PeerId` + fn deserialize>(d: D) -> Result { + struct Base58CheckVisitor; + + impl<'de> ::serde::de::Visitor<'de> for Base58CheckVisitor { + type Value = PeerId; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("an ASCII base58check string") + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: ::serde::de::Error, + { + if let Ok(v_str) = std::str::from_utf8(v) { + PeerId::from_str(v_str).map_err(E::custom) + } else { + Err(E::invalid_value(::serde::de::Unexpected::Bytes(v), &self)) + } + } + + fn visit_str(self, v: &str) -> Result + where + E: ::serde::de::Error, + { + PeerId::from_str(v).map_err(E::custom) + } + } + d.deserialize_str(Base58CheckVisitor) + } +} diff --git a/massa-protocol-exports/src/settings.rs b/massa-protocol-exports/src/settings.rs index e6fb8aac04c..e18e2217bc7 100644 --- a/massa-protocol-exports/src/settings.rs +++ b/massa-protocol-exports/src/settings.rs @@ -84,6 +84,8 @@ pub struct ProtocolConfig { pub max_operations_propagation_time: MassaTime, /// max time we propagate endorsements pub max_endorsements_propagation_time: MassaTime, + /// Max message size + pub max_message_size: usize, /// number of thread tester pub thread_tester_count: u8, /// Max size of the channel for command to the connectivity thread diff --git a/massa-protocol-exports/src/test_exports/config.rs b/massa-protocol-exports/src/test_exports/config.rs index 83ccabb5a5b..4dca993087a 100644 --- a/massa-protocol-exports/src/test_exports/config.rs +++ b/massa-protocol-exports/src/test_exports/config.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use crate::{settings::PeerCategoryInfo, ProtocolConfig}; -use massa_models::config::ENDORSEMENT_COUNT; +use massa_models::config::{ENDORSEMENT_COUNT, MAX_MESSAGE_SIZE}; use massa_time::MassaTime; use tempfile::NamedTempFile; @@ -12,7 +12,7 @@ impl Default for ProtocolConfig { .expect("cannot create temp file") .path() .to_path_buf(), - ask_block_timeout: 500.into(), + ask_block_timeout: MassaTime::from_millis(500), max_known_blocks_saved_size: 300, max_known_blocks_size: 100, max_node_known_blocks_size: 100, @@ -26,10 +26,10 @@ impl Default for ProtocolConfig { operation_batch_buffer_capacity: 1000, operation_announcement_buffer_capacity: 1000, max_operation_storage_time: MassaTime::from_millis(60000), - operation_batch_proc_period: 200.into(), + operation_batch_proc_period: MassaTime::from_millis(200), asked_operations_buffer_capacity: 10000, - asked_operations_pruning_period: 500.into(), - operation_announcement_interval: 150.into(), + asked_operations_pruning_period: MassaTime::from_millis(500), + operation_announcement_interval: MassaTime::from_millis(150), max_operations_per_message: 1024, max_operations_per_block: 5000, thread_count: 32, @@ -59,6 +59,7 @@ impl Default for ProtocolConfig { max_size_channel_network_to_peer_handler: 1000, max_size_channel_commands_peer_testers: 10000, max_size_channel_commands_peers: 300, + max_message_size: MAX_MESSAGE_SIZE as usize, endorsement_count: ENDORSEMENT_COUNT, max_size_block_infos: 200, max_size_value_datastore: 1_000_000, diff --git a/massa-protocol-exports/src/test_exports/tools.rs b/massa-protocol-exports/src/test_exports/tools.rs index 1bc9a81d2f7..eba78ff387f 100644 --- a/massa-protocol-exports/src/test_exports/tools.rs +++ b/massa-protocol-exports/src/test_exports/tools.rs @@ -22,6 +22,8 @@ use massa_signature::KeyPair; pub fn create_block(keypair: &KeyPair) -> SecureShareBlock { let header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot: Slot::new(1, 0), parents: vec![ BlockId(Hash::compute_from("Genesis 0".as_bytes())), @@ -64,6 +66,8 @@ pub fn create_block_with_operations( ); let header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot, parents: vec![ BlockId(Hash::compute_from("Genesis 0".as_bytes())), @@ -102,6 +106,8 @@ pub fn create_block_with_endorsements( ) -> SecureShareBlock { let header = BlockHeader::new_verifiable( BlockHeader { + current_version: 0, + announced_version: 0, slot, parents: vec![ BlockId(Hash::compute_from("Genesis 0".as_bytes())), @@ -130,7 +136,7 @@ pub fn create_block_with_endorsements( /// Creates an endorsement for use in protocol tests, /// without paying attention to consensus related things. pub fn create_endorsement() -> SecureShareEndorsement { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let content = Endorsement { slot: Slot::new(10, 1), @@ -145,7 +151,7 @@ pub fn create_operation_with_expire_period( keypair: &KeyPair, expire_period: u64, ) -> SecureShareOperation { - let recv_keypair = KeyPair::generate(); + let recv_keypair = KeyPair::generate(0).unwrap(); let op = OperationType::Transaction { recipient_address: Address::from_public_key(&recv_keypair.get_public_key()), diff --git a/massa-protocol-worker/Cargo.toml b/massa-protocol-worker/Cargo.toml index c8515de082b..02b0c943c5e 100644 --- a/massa-protocol-worker/Cargo.toml +++ b/massa-protocol-worker/Cargo.toml @@ -1,21 +1,22 @@ [package] name = "massa_protocol_worker" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" [dependencies] -tracing = "0.1" +tracing = {version = "0.1", features = ["log"]} rand = "0.8" parking_lot = "0.12" crossbeam = "0.8" serde_json = "1.0" -nom = "7.1" +nom = "=7.1" num_enum = "0.5" -peernet = { git = "https://github.com/massalabs/PeerNet", rev = "1bb1f452bf63b78a89eb9542fb019b88d894c664" } +# TODO tag peernet version +peernet = { git = "https://github.com/massalabs/PeerNet", rev = "bf8adf5" } tempfile = { version = "3.3", optional = true } # use with testing feature rayon = "1.7.0" -lru = "0.10.0" +schnellru = "0.2.1" # modules Custom massa_hash = { path = "../massa-hash" } @@ -28,6 +29,7 @@ massa_storage = { path = "../massa-storage" } massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } +massa_versioning = { path = "../massa-versioning" } [dev-dependencies] tempfile = "3.3" diff --git a/massa-protocol-worker/src/connectivity.rs b/massa-protocol-worker/src/connectivity.rs index 930b348756a..fcd33b8d0d3 100644 --- a/massa-protocol-worker/src/connectivity.rs +++ b/massa-protocol-worker/src/connectivity.rs @@ -5,14 +5,14 @@ use crossbeam::{ use massa_consensus_exports::ConsensusController; use massa_models::stats::NetworkStats; use massa_pool_exports::PoolController; -use massa_protocol_exports::{PeerCategoryInfo, ProtocolConfig, ProtocolError}; +use massa_protocol_exports::{PeerCategoryInfo, PeerId, ProtocolConfig, ProtocolError}; use massa_storage::Storage; +use massa_versioning::versioning::MipStore; use parking_lot::RwLock; -use peernet::{peer::PeerConnectionType, transports::OutConnectionConfig}; -use peernet::{peer_id::PeerId, transports::TcpOutConnectionConfig}; +use peernet::peer::PeerConnectionType; use std::net::SocketAddr; +use std::sync::Arc; use std::{collections::HashMap, net::IpAddr}; -use std::{num::NonZeroUsize, sync::Arc}; use std::{thread::JoinHandle, time::Duration}; use tracing::{info, warn}; @@ -60,6 +60,7 @@ pub(crate) fn start_connectivity_thread( peer_categories: HashMap, PeerCategoryInfo)>, _default_category: PeerCategoryInfo, config: ProtocolConfig, + mip_store: MipStore, ) -> Result<(Sender, JoinHandle<()>), ProtocolError> { let handle = std::thread::Builder::new() .name("protocol-connectivity".to_string()) @@ -85,17 +86,17 @@ pub(crate) fn start_connectivity_thread( let total_in_slots = config.peers_categories.values().map(|v| v.max_in_connections_post_handshake).sum::() + config.default_category_info.max_in_connections_post_handshake; let total_out_slots = config.peers_categories.values().map(| v| v.target_out_connections).sum::() + config.default_category_info.target_out_connections; let operation_cache = Arc::new(RwLock::new(OperationCache::new( - NonZeroUsize::new(config.max_known_ops_size).unwrap(), - NonZeroUsize::new(total_in_slots + total_out_slots).unwrap(), + config.max_known_blocks_size.try_into().unwrap(), + (total_in_slots + total_out_slots).try_into().unwrap(), ))); let endorsement_cache = Arc::new(RwLock::new(EndorsementCache::new( - NonZeroUsize::new(config.max_known_endorsements_size).unwrap(), - NonZeroUsize::new(total_in_slots + total_out_slots).unwrap(), + config.max_known_endorsements_size.try_into().unwrap(), + (total_in_slots + total_out_slots).try_into().unwrap() ))); let block_cache = Arc::new(RwLock::new(BlockCache::new( - NonZeroUsize::new(config.max_known_blocks_size).unwrap(), - NonZeroUsize::new(total_in_slots + total_out_slots).unwrap(), + config.max_known_blocks_size.try_into().unwrap(), + (total_in_slots + total_out_slots).try_into().unwrap(), ))); // Start handlers @@ -154,6 +155,7 @@ pub(crate) fn start_connectivity_thread( operation_cache, block_cache, storage.clone_without_refs(), + mip_store, ); //Try to connect to peers @@ -193,7 +195,7 @@ pub(crate) fn start_connectivity_thread( let peers: HashMap = network_controller.get_active_connections().get_peers_connected().into_iter().map(|(peer_id, peer)| { (peer_id, (peer.0, peer.1)) }).collect(); - responder.send((stats, peers)).unwrap_or_else(|_| warn!("Failed to send stats to responder")); + responder.try_send((stats, peers)).unwrap_or_else(|_| warn!("Failed to send stats to responder")); } Err(_) => { warn!("Channel to connectivity thread is closed. Stopping the protocol"); @@ -269,7 +271,7 @@ pub(crate) fn start_connectivity_thread( for addr in addresses_to_connect { info!("Trying to connect to addr {}", addr); // We only manage TCP for now - if let Err(err) = network_controller.try_connect(addr, config.timeout_connection.to_duration(), &OutConnectionConfig::Tcp(Box::new(TcpOutConnectionConfig::new(config.read_write_limit_bytes_per_second / 10, Duration::from_millis(100))))) { + if let Err(err) = network_controller.try_connect(addr, config.timeout_connection.to_duration()) { warn!("Failed to connect to peer {:?}: {:?}", addr, err); } } diff --git a/massa-protocol-worker/src/context.rs b/massa-protocol-worker/src/context.rs new file mode 100644 index 00000000000..1f969743588 --- /dev/null +++ b/massa-protocol-worker/src/context.rs @@ -0,0 +1,14 @@ +use massa_protocol_exports::PeerId; +use massa_signature::KeyPair; +use peernet::context::Context as PeernetContext; + +#[derive(Clone)] +pub struct Context { + pub our_keypair: KeyPair, +} + +impl PeernetContext for Context { + fn get_peer_id(&self) -> PeerId { + PeerId::from_public_key(self.our_keypair.get_public_key()) + } +} diff --git a/massa-protocol-worker/src/controller.rs b/massa-protocol-worker/src/controller.rs index 43c7949d291..a4c0df110b7 100644 --- a/massa-protocol-worker/src/controller.rs +++ b/massa-protocol-worker/src/controller.rs @@ -7,9 +7,9 @@ use massa_models::{ prehash::{PreHashMap, PreHashSet}, stats::NetworkStats, }; -use massa_protocol_exports::{BootstrapPeers, ProtocolController, ProtocolError}; +use massa_protocol_exports::{BootstrapPeers, PeerId, ProtocolController, ProtocolError}; use massa_storage::Storage; -use peernet::{peer::PeerConnectionType, peer_id::PeerId}; +use peernet::peer::PeerConnectionType; use crate::{ connectivity::ConnectivityCommand, @@ -76,7 +76,7 @@ impl ProtocolController for ProtocolControllerImpl { self.sender_block_handler .as_ref() .unwrap() - .send(BlockHandlerPropagationCommand::IntegratedBlock { block_id, storage }) + .try_send(BlockHandlerPropagationCommand::IntegratedBlock { block_id, storage }) .map_err(|_| ProtocolError::ChannelError("integrated_block command send error".into())) } @@ -85,7 +85,7 @@ impl ProtocolController for ProtocolControllerImpl { self.sender_block_handler .as_ref() .unwrap() - .send(BlockHandlerPropagationCommand::AttackBlockDetected( + .try_send(BlockHandlerPropagationCommand::AttackBlockDetected( block_id, )) .map_err(|_| { @@ -117,7 +117,7 @@ impl ProtocolController for ProtocolControllerImpl { self.sender_operation_handler .as_ref() .unwrap() - .send(OperationHandlerPropagationCommand::AnnounceOperations( + .try_send(OperationHandlerPropagationCommand::AnnounceOperations( operations, )) .map_err(|_| { @@ -130,7 +130,7 @@ impl ProtocolController for ProtocolControllerImpl { self.sender_endorsement_handler .as_ref() .unwrap() - .send(EndorsementHandlerPropagationCommand::PropagateEndorsements( + .try_send(EndorsementHandlerPropagationCommand::PropagateEndorsements( endorsements, )) .map_err(|_| { @@ -151,7 +151,7 @@ impl ProtocolController for ProtocolControllerImpl { self.sender_connectivity_thread .as_ref() .unwrap() - .send(ConnectivityCommand::GetStats { responder: sender }) + .try_send(ConnectivityCommand::GetStats { responder: sender }) .map_err(|_| ProtocolError::ChannelError("get_stats command send error".into()))?; receiver .recv_timeout(Duration::from_secs(10)) @@ -162,7 +162,7 @@ impl ProtocolController for ProtocolControllerImpl { self.sender_peer_management_thread .as_ref() .unwrap() - .send(PeerManagementCmd::Ban(peer_ids)) + .try_send(PeerManagementCmd::Ban(peer_ids)) .map_err(|_| ProtocolError::ChannelError("ban_peers command send error".into())) } @@ -170,7 +170,7 @@ impl ProtocolController for ProtocolControllerImpl { self.sender_peer_management_thread .as_ref() .unwrap() - .send(PeerManagementCmd::Unban(peer_ids)) + .try_send(PeerManagementCmd::Unban(peer_ids)) .map_err(|_| ProtocolError::ChannelError("unban_peers command send error".into())) } @@ -179,7 +179,7 @@ impl ProtocolController for ProtocolControllerImpl { self.sender_peer_management_thread .as_ref() .unwrap() - .send(PeerManagementCmd::GetBootstrapPeers { responder: sender }) + .try_send(PeerManagementCmd::GetBootstrapPeers { responder: sender }) .map_err(|_| { ProtocolError::ChannelError("get_bootstrap_peers command send error".into()) })?; diff --git a/massa-protocol-worker/src/handlers/block_handler/cache.rs b/massa-protocol-worker/src/handlers/block_handler/cache.rs index e38cfa47120..cb9cbca087e 100644 --- a/massa-protocol-worker/src/handlers/block_handler/cache.rs +++ b/massa-protocol-worker/src/handlers/block_handler/cache.rs @@ -1,15 +1,16 @@ -use std::{num::NonZeroUsize, sync::Arc, time::Instant}; +use std::{collections::HashSet, sync::Arc, time::Instant}; -use lru::LruCache; use massa_models::{block_header::SecuredHeader, block_id::BlockId}; +use massa_protocol_exports::PeerId; use parking_lot::RwLock; -use peernet::peer_id::PeerId; +use schnellru::{ByLength, LruMap}; +use tracing::log::warn; pub struct BlockCache { - pub checked_headers: LruCache, + pub checked_headers: LruMap, #[allow(clippy::type_complexity)] - pub blocks_known_by_peer: LruCache, Instant)>, - pub max_known_blocks_by_peer: NonZeroUsize, + pub blocks_known_by_peer: LruMap, Instant)>, + pub max_known_blocks_by_peer: u32, } impl BlockCache { @@ -20,25 +21,64 @@ impl BlockCache { val: bool, timeout: Instant, ) { - let (blocks, _) = self + let Ok((blocks, _)) = self .blocks_known_by_peer - .get_or_insert_mut(from_peer_id.clone(), || { - (LruCache::new(self.max_known_blocks_by_peer), Instant::now()) - }); + .get_or_insert(from_peer_id.clone(), || { + ( + LruMap::new(ByLength::new(self.max_known_blocks_by_peer)), + Instant::now(), + ) + }) + .ok_or(()) else { + warn!("blocks_known_by_peer limit reached"); + return; + }; for block_id in block_ids { - blocks.put(*block_id, (val, timeout)); + blocks.insert(*block_id, (val, timeout)); } } } impl BlockCache { - pub fn new(max_known_blocks: NonZeroUsize, max_known_blocks_by_peer: NonZeroUsize) -> Self { + pub fn new(max_known_blocks: u32, max_known_blocks_by_peer: u32) -> Self { Self { - checked_headers: LruCache::new(max_known_blocks), - blocks_known_by_peer: LruCache::new(max_known_blocks_by_peer), + checked_headers: LruMap::new(ByLength::new(max_known_blocks)), + blocks_known_by_peer: LruMap::new(ByLength::new(max_known_blocks_by_peer)), max_known_blocks_by_peer, } } + + pub fn update_cache( + &mut self, + peers_connected: HashSet, + max_known_blocks_by_peer: u32, + ) { + let peers: Vec = self + .blocks_known_by_peer + .iter() + .map(|(id, _)| id.clone()) + .collect(); + + // Clean shared cache if peers do not exist anymore + for peer_id in peers { + if !peers_connected.contains(&peer_id) { + self.blocks_known_by_peer.remove(&peer_id); + } + } + + // Add new potential peers + for peer_id in peers_connected { + if self.blocks_known_by_peer.peek(&peer_id).is_none() { + self.blocks_known_by_peer.insert( + peer_id.clone(), + ( + LruMap::new(ByLength::new(max_known_blocks_by_peer)), + Instant::now(), + ), + ); + } + } + } } pub type SharedBlockCache = Arc>; diff --git a/massa-protocol-worker/src/handlers/block_handler/messages.rs b/massa-protocol-worker/src/handlers/block_handler/messages.rs index 07d8d50f4e4..c315be802be 100644 --- a/massa-protocol-worker/src/handlers/block_handler/messages.rs +++ b/massa-protocol-worker/src/handlers/block_handler/messages.rs @@ -8,7 +8,9 @@ use massa_models::{ }, secure_share::{SecureShareDeserializer, SecureShareSerializer}, }; -use massa_serialization::{Deserializer, Serializer, U64VarIntDeserializer, U64VarIntSerializer}; +use massa_serialization::{ + Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, +}; use nom::{ error::{context, ContextError, ParseError}, multi::length_count, @@ -55,21 +57,6 @@ pub enum BlockMessage { ReplyForBlocks(Vec<(BlockId, BlockInfoReply)>), } -impl BlockMessage { - pub fn get_id(&self) -> MessageTypeId { - match self { - BlockMessage::BlockHeader(_) => MessageTypeId::BlockHeader, - BlockMessage::AskForBlocks(_) => MessageTypeId::AskForBlocks, - BlockMessage::ReplyForBlocks(_) => MessageTypeId::ReplyForBlocks, - } - } - - pub fn max_id() -> u64 { - >::into(MessageTypeId::ReplyForBlocks) + 1 - } -} - -// DO NOT FORGET TO UPDATE MAX ID IF YOU UPDATE THERE #[derive(IntoPrimitive, Debug, Eq, PartialEq, TryFromPrimitive)] #[repr(u64)] pub enum MessageTypeId { @@ -78,6 +65,16 @@ pub enum MessageTypeId { ReplyForBlocks, } +impl From<&BlockMessage> for MessageTypeId { + fn from(value: &BlockMessage) -> Self { + match value { + BlockMessage::BlockHeader(_) => MessageTypeId::BlockHeader, + BlockMessage::AskForBlocks(_) => MessageTypeId::AskForBlocks, + BlockMessage::ReplyForBlocks(_) => MessageTypeId::ReplyForBlocks, + } + } +} + #[derive(IntoPrimitive, Debug, Eq, PartialEq, TryFromPrimitive)] #[repr(u64)] pub enum BlockInfoType { @@ -114,6 +111,12 @@ impl Serializer for BlockMessageSerializer { value: &BlockMessage, buffer: &mut Vec, ) -> Result<(), massa_serialization::SerializeError> { + self.id_serializer.serialize( + &MessageTypeId::from(value).try_into().map_err(|_| { + SerializeError::GeneralError(String::from("Failed to serialize id")) + })?, + buffer, + )?; match value { BlockMessage::BlockHeader(endorsements) => { self.secure_share_serializer @@ -189,7 +192,6 @@ impl Serializer for BlockMessageSerializer { } pub struct BlockMessageDeserializer { - message_id: u64, id_deserializer: U64VarIntDeserializer, block_header_deserializer: SecureShareDeserializer, block_infos_length_deserializer: U64VarIntDeserializer, @@ -216,7 +218,6 @@ pub struct BlockMessageDeserializerArgs { impl BlockMessageDeserializer { pub fn new(args: BlockMessageDeserializerArgs) -> Self { Self { - message_id: 0, id_deserializer: U64VarIntDeserializer::new(Included(0), Included(u64::MAX)), block_header_deserializer: SecureShareDeserializer::new(BlockHeaderDeserializer::new( args.thread_count, @@ -243,10 +244,6 @@ impl BlockMessageDeserializer { ), } } - - pub fn set_message_id(&mut self, message_id: u64) { - self.message_id = message_id; - } } impl Deserializer for BlockMessageDeserializer { @@ -255,7 +252,8 @@ impl Deserializer for BlockMessageDeserializer { buffer: &'a [u8], ) -> IResult<&'a [u8], BlockMessage, E> { context("Failed BlockMessage deserialization", |buffer| { - let id = MessageTypeId::try_from(self.message_id).map_err(|_| { + let (buffer, raw_id) = self.id_deserializer.deserialize(buffer)?; + let id = MessageTypeId::try_from(raw_id).map_err(|_| { nom::Err::Error(ParseError::from_error_kind( buffer, nom::error::ErrorKind::Eof, diff --git a/massa-protocol-worker/src/handlers/block_handler/mod.rs b/massa-protocol-worker/src/handlers/block_handler/mod.rs index f95592be23d..29a39749bb4 100644 --- a/massa-protocol-worker/src/handlers/block_handler/mod.rs +++ b/massa-protocol-worker/src/handlers/block_handler/mod.rs @@ -5,6 +5,7 @@ use massa_consensus_exports::ConsensusController; use massa_pool_exports::PoolController; use massa_protocol_exports::ProtocolConfig; use massa_storage::Storage; +use massa_versioning::versioning::MipStore; use crate::wrap_network::ActiveConnectionsTrait; @@ -59,6 +60,7 @@ impl BlockHandler { operation_cache: SharedOperationCache, cache: SharedBlockCache, storage: Storage, + mip_store: MipStore, ) -> Self { let block_retrieval_thread = start_retrieval_thread( active_connections.clone(), @@ -74,6 +76,7 @@ impl BlockHandler { operation_cache, cache.clone(), storage.clone_without_refs(), + mip_store, ); let block_propagation_thread = start_propagation_thread( active_connections, diff --git a/massa-protocol-worker/src/handlers/block_handler/propagation.rs b/massa-protocol-worker/src/handlers/block_handler/propagation.rs index 814e618c3a8..5e09a03eabe 100644 --- a/massa-protocol-worker/src/handlers/block_handler/propagation.rs +++ b/massa-protocol-worker/src/handlers/block_handler/propagation.rs @@ -1,12 +1,11 @@ -use std::{collections::VecDeque, num::NonZeroUsize, thread::JoinHandle, time::Instant}; +use std::{collections::VecDeque, thread::JoinHandle}; use crossbeam::channel::{Receiver, Sender}; -use lru::LruCache; use massa_logging::massa_trace; use massa_models::{block_id::BlockId, prehash::PreHashSet}; +use massa_protocol_exports::PeerId; use massa_protocol_exports::{ProtocolConfig, ProtocolError}; use massa_storage::Storage; -use peernet::peer_id::PeerId; use tracing::{debug, info, warn}; use crate::{ @@ -64,41 +63,18 @@ impl PropagationThread { continue; } }; - - // Clean shared cache if peers do not exist anymore - { - let mut cache_write = self.cache.write(); - let peers: Vec = cache_write - .blocks_known_by_peer - .iter() - .map(|(id, _)| id.clone()) - .collect(); - let peers_connected = - self.active_connections.get_peer_ids_connected(); - for peer_id in peers { - if !peers_connected.contains(&peer_id) { - cache_write.blocks_known_by_peer.pop(&peer_id); - } - } - for peer_id in peers_connected { - if !cache_write.blocks_known_by_peer.contains(&peer_id) { - //TODO: Change to detect the connection before - cache_write.blocks_known_by_peer.put( - peer_id, - ( - LruCache::new( - NonZeroUsize::new(self.config.max_node_known_blocks_size) - .expect("max_node_known_blocks_size in config must be > 0"), - ), - Instant::now(), - ), - ); - } - } - } + let peers_connected = self.active_connections.get_peer_ids_connected(); + self.cache.write().update_cache( + peers_connected, + self.config + .max_node_known_blocks_size + .try_into() + .expect("max_node_known_blocks_size is too big"), + ); { let cache_read = self.cache.read(); - for (peer_id, (blocks_known, _)) in &cache_read.blocks_known_by_peer + for (peer_id, (blocks_known, _)) in + cache_read.blocks_known_by_peer.iter() { // peer that isn't asking for that block let cond = blocks_known.peek(&block_id); @@ -161,7 +137,7 @@ impl PropagationThread { fn ban_node(&mut self, peer_id: &PeerId) -> Result<(), ProtocolError> { massa_trace!("ban node from retrieval thread", { "peer_id": peer_id.to_string() }); self.peer_cmd_sender - .send(PeerManagementCmd::Ban(vec![peer_id.clone()])) + .try_send(PeerManagementCmd::Ban(vec![peer_id.clone()])) .map_err(|err| ProtocolError::SendError(err.to_string())) } } diff --git a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs index e3cf983aeee..c3420134506 100644 --- a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs +++ b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs @@ -1,6 +1,5 @@ use std::{ collections::{hash_map::Entry, HashMap, HashSet}, - num::NonZeroUsize, thread::JoinHandle, time::Instant, }; @@ -21,7 +20,6 @@ use crossbeam::{ channel::{at, Receiver, Sender}, select, }; -use lru::LruCache; use massa_consensus_exports::ConsensusController; use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_logging::massa_trace; @@ -33,13 +31,16 @@ use massa_models::{ operation::{OperationId, SecureShareOperation}, prehash::{CapacityAllocator, PreHashMap, PreHashSet}, secure_share::{Id, SecureShare}, + timeslots::get_block_slot_timestamp, }; use massa_pool_exports::PoolController; +use massa_protocol_exports::PeerId; use massa_protocol_exports::{ProtocolConfig, ProtocolError}; use massa_serialization::{DeserializeError, Deserializer, Serializer}; use massa_storage::Storage; use massa_time::TimeError; -use peernet::peer_id::PeerId; +use massa_versioning::versioning::MipStore; +use schnellru::{ByLength, LruMap}; use tracing::{debug, info, warn}; use super::{ @@ -98,11 +99,12 @@ pub struct RetrievalThread { cache: SharedBlockCache, config: ProtocolConfig, storage: Storage, + mip_store: MipStore, } impl RetrievalThread { fn run(&mut self) { - let mut block_message_deserializer = + let block_message_deserializer = BlockMessageDeserializer::new(BlockMessageDeserializerArgs { thread_count: self.config.thread_count, endorsement_count: self.config.endorsement_count, @@ -121,8 +123,7 @@ impl RetrievalThread { select! { recv(self.receiver_network) -> msg => { match msg { - Ok((peer_id, message_id, message)) => { - block_message_deserializer.set_message_id(message_id); + Ok((peer_id, message)) => { let (rest, message) = match block_message_deserializer .deserialize::(&message) { Ok((rest, message)) => (rest, message), @@ -295,22 +296,6 @@ impl RetrievalThread { }; all_blocks_info.push((*hash, block_info)); } - // Clean shared cache if peers do not exist anymore - { - let mut cache_write = self.cache.write(); - let peers: Vec = cache_write - .blocks_known_by_peer - .iter() - .map(|(id, _)| id.clone()) - .collect(); - let connected_peers = self.active_connections.get_peer_ids_connected(); - for peer_id in peers { - if !connected_peers.contains(&peer_id) { - cache_write.blocks_known_by_peer.pop(&peer_id); - self.asked_blocks.remove(&peer_id); - } - } - } debug!( "Send reply for blocks of len {} to {}", all_blocks_info.len(), @@ -429,6 +414,29 @@ impl RetrievalThread { Ok(()) } + /// Check if the incoming header network version is compatible with the current node + fn check_network_version_compatibility( + &self, + header: &SecuredHeader, + ) -> Result<(), ProtocolError> { + let slot = header.content.slot; + let ts = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + slot, + )?; + let version = self.mip_store.get_network_version_active_at(ts); + if header.content.current_version != version { + Err(ProtocolError::IncompatibleNetworkVersion { + local: version, + received: header.content.current_version, + }) + } else { + Ok(()) + } + } + /// Perform checks on a header, /// and if valid update the node's view of the world. /// @@ -451,12 +459,14 @@ impl RetrievalThread { header: &SecuredHeader, from_peer_id: &PeerId, ) -> Result, ProtocolError> { - //TODO: Check if the error is used here ? + // TODO: Check if the error is used here ? // refuse genesis blocks if header.content.slot.period == 0 || header.content.parents.is_empty() { return Ok(None); } + self.check_network_version_compatibility(header)?; + // compute ID let block_id = header.id; @@ -471,18 +481,24 @@ impl RetrievalThread { true, Instant::now(), ); - { + 'write_cache: { let mut endorsement_cache_write = self.endorsement_cache.write(); - let endorsement_ids = endorsement_cache_write + let Ok(endorsement_ids) = endorsement_cache_write .endorsements_known_by_peer - .get_or_insert_mut(from_peer_id.clone(), || { - LruCache::new( - NonZeroUsize::new(self.config.max_node_known_blocks_size) + .get_or_insert(from_peer_id.clone(), || { + LruMap::new(ByLength::new( + self.config + .max_node_known_blocks_size + .try_into() .expect("max_node_known_blocks_size in config must be > 0"), - ) - }); + )) + }) + .ok_or(()) else { + warn!("endorsements known by peer limit reached"); + break 'write_cache; + }; for endorsement_id in block_header.content.endorsements.iter().map(|e| e.id) { - endorsement_ids.put(endorsement_id, ()); + endorsement_ids.insert(endorsement_id, ()); } } return Ok(Some((block_id, false))); @@ -529,7 +545,7 @@ impl RetrievalThread { } { let mut cache_write = self.cache.write(); - cache_write.checked_headers.put(block_id, header.clone()); + cache_write.checked_headers.insert(block_id, header.clone()); cache_write.insert_blocks_known(from_peer_id, &[block_id], true, Instant::now()); cache_write.insert_blocks_known( from_peer_id, @@ -537,18 +553,24 @@ impl RetrievalThread { true, Instant::now(), ); - { + 'write_cache: { let mut endorsement_cache_write = self.endorsement_cache.write(); - let endorsement_ids = endorsement_cache_write + let Ok(endorsement_ids) = endorsement_cache_write .endorsements_known_by_peer - .get_or_insert_mut(from_peer_id.clone(), || { - LruCache::new( - NonZeroUsize::new(self.config.max_node_known_blocks_size) + .get_or_insert(from_peer_id.clone(), || { + LruMap::new(ByLength::new( + self.config + .max_node_known_blocks_size + .try_into() .expect("max_node_known_blocks_size in config must be > 0"), - ) - }); + )) + }) + .ok_or(()) else { + warn!("endorsements_known_by_peer limit reached"); + break 'write_cache; + }; for endorsement_id in header.content.endorsements.iter().map(|e| e.id) { - endorsement_ids.put(endorsement_id, ()); + endorsement_ids.insert(endorsement_id, ()); } } } @@ -560,7 +582,7 @@ impl RetrievalThread { fn ban_node(&mut self, peer_id: &PeerId) -> Result<(), ProtocolError> { massa_trace!("ban node from retrieval thread", { "peer_id": peer_id.to_string() }); self.peer_cmd_sender - .send(PeerManagementCmd::Ban(vec![peer_id.clone()])) + .try_send(PeerManagementCmd::Ban(vec![peer_id.clone()])) .map_err(|err| ProtocolError::SendError(err.to_string())) } @@ -598,7 +620,11 @@ impl RetrievalThread { // check endorsement signature if not already checked { let read_cache = self.endorsement_cache.read(); - if !read_cache.checked_endorsements.contains(&endorsement_id) { + if read_cache + .checked_endorsements + .peek(&endorsement_id) + .is_none() + { new_endorsements.insert(endorsement_id, endorsement); } } @@ -619,24 +645,29 @@ impl RetrievalThread { .collect::>(), )?; - { + 'write_cache: { let mut cache_write = self.endorsement_cache.write(); // add to verified signature cache for endorsement_id in endorsement_ids.iter() { - cache_write.checked_endorsements.put(*endorsement_id, ()); + cache_write.checked_endorsements.insert(*endorsement_id, ()); } // add to known endorsements for source node. - let endorsements = cache_write.endorsements_known_by_peer.get_or_insert_mut( - from_peer_id.clone(), - || { - LruCache::new( - NonZeroUsize::new(self.config.max_node_known_endorsements_size) + let Ok(endorsements) = cache_write + .endorsements_known_by_peer + .get_or_insert(from_peer_id.clone(), || { + LruMap::new(ByLength::new( + self.config + .max_node_known_endorsements_size + .try_into() .expect("max_node_known_endorsements_size in config should be > 0"), - ) - }, - ); + )) + }) + .ok_or(()) else { + warn!("endorsements_known_by_peer limit reached"); + break 'write_cache; + }; for endorsement_id in endorsement_ids.iter() { - endorsements.put(*endorsement_id, ()); + endorsements.insert(*endorsement_id, ()); } } @@ -676,19 +707,24 @@ impl RetrievalThread { let operation_ids_set: PreHashSet = operation_ids.iter().cloned().collect(); // add to known ops - { + 'write_cache: { let mut cache_write = self.operation_cache.write(); - let known_ops = - cache_write - .ops_known_by_peer - .get_or_insert_mut(from_peer_id.clone(), || { - LruCache::new( - NonZeroUsize::new(self.config.max_node_known_ops_size) - .expect("max_node_known_ops_size in config should be > 0"), - ) - }); + let Ok(known_ops) = cache_write + .ops_known_by_peer + .get_or_insert(from_peer_id.clone(), || { + LruMap::new(ByLength::new( + self.config + .max_node_known_ops_size + .try_into() + .expect("max_node_known_ops_size in config should be > 0"), + )) + }) + .ok_or(()) else { + warn!("ops_known_by_peer limitation reached"); + break 'write_cache; + }; for op_id in operation_ids_set.iter() { - known_ops.put(op_id.prefix(), ()); + known_ops.insert(op_id.prefix(), ()); } } let info = if let Some(info) = self.block_wishlist.get_mut(&block_id) { @@ -986,11 +1022,12 @@ impl RetrievalThread { }; // Check operation signature only if not already checked. - if !self + if self .operation_cache .read() .checked_operations - .contains(&operation_id) + .peek(&operation_id) + .is_none() { // check signature if the operation wasn't in `checked_operation` new_operations.insert(operation_id, operation); @@ -1011,7 +1048,7 @@ impl RetrievalThread { } } self.sender_propagation_ops - .send(OperationHandlerPropagationCommand::AnnounceOperations( + .try_send(OperationHandlerPropagationCommand::AnnounceOperations( new_operations.keys().copied().collect(), )) .map_err(|err| ProtocolError::ChannelError(err.to_string()))?; @@ -1055,19 +1092,15 @@ impl RetrievalThread { ) }; let mut needs_ask = true; - // Clean old peers that aren't active anymore - let peers_connected: HashSet = - self.active_connections.get_peer_ids_connected(); - let peers_in_cache: Vec = cache_write - .blocks_known_by_peer - .iter() - .map(|(peer_id, _)| peer_id.clone()) - .collect(); - for peer_id in peers_in_cache { - if !peers_connected.contains(&peer_id) { - cache_write.blocks_known_by_peer.pop(&peer_id); - } - } + + let peers_connected = self.active_connections.get_peer_ids_connected(); + cache_write.update_cache( + peers_connected.clone(), + self.config + .max_node_known_blocks_size + .try_into() + .expect("max_node_known_blocks_size is too big"), + ); let peers_in_asked_blocks: Vec = self.asked_blocks.keys().cloned().collect(); for peer_id in peers_in_asked_blocks { @@ -1075,30 +1108,22 @@ impl RetrievalThread { self.asked_blocks.remove(&peer_id); } } - // Add new peers for peer_id in peers_connected { - if !cache_write.blocks_known_by_peer.contains(&peer_id) { - //TODO: Change to detect the connection before - cache_write.blocks_known_by_peer.put( - peer_id.clone(), - ( - LruCache::new( - NonZeroUsize::new(self.config.max_node_known_blocks_size) - .expect("max_node_known_blocks_size in config must be > 0"), - ), - Instant::now(), - ), - ); - } else { - cache_write.blocks_known_by_peer.promote(&peer_id); - } - if !self.asked_blocks.contains_key(&peer_id) { self.asked_blocks .insert(peer_id.clone(), PreHashMap::default()); } } - for (peer_id, (blocks_known, _)) in cache_write.blocks_known_by_peer.iter_mut() { + let all_keys: Vec = cache_write + .blocks_known_by_peer + .iter() + .map(|(k, _)| k) + .cloned() + .collect(); + for peer_id in all_keys.iter() { + // for (peer_id, (blocks_known, _)) in cache_write.blocks_known_by_peer.iter() { + let (blocks_known, _) = + cache_write.blocks_known_by_peer.peek_mut(peer_id).unwrap(); // map to remove the borrow on asked_blocks. Otherwise can't call insert_known_blocks let ask_time_opt = self .asked_blocks @@ -1139,10 +1164,10 @@ impl RetrievalThread { continue; // not a candidate } // timed out, supposed to have it - (true, Some(timeout_at), Some((true, info_time))) => { - if info_time < &timeout_at { + (true, Some(mut timeout_at), Some((true, info_time))) => { + if info_time < &mut timeout_at { // info less recent than timeout: mark as not having it - blocks_known.put(*hash, (false, timeout_at)); + blocks_known.insert(*hash, (false, timeout_at)); (2u8, ask_time_opt) } else { // told us it has it after a timeout: good candidate again @@ -1150,16 +1175,16 @@ impl RetrievalThread { } } // timed out, supposed to not have it - (true, Some(timeout_at), Some((false, info_time))) => { - if info_time < &timeout_at { + (true, Some(mut timeout_at), Some((false, info_time))) => { + if info_time < &mut timeout_at { // info less recent than timeout: update info time - blocks_known.put(*hash, (false, timeout_at)); + blocks_known.insert(*hash, (false, timeout_at)); } (2u8, ask_time_opt) } // timed out but don't know if has it: mark as not having it (true, Some(timeout_at), None) => { - blocks_known.put(*hash, (false, timeout_at)); + blocks_known.insert(*hash, (false, timeout_at)); (2u8, ask_time_opt) } }; @@ -1270,6 +1295,7 @@ impl RetrievalThread { } #[allow(clippy::too_many_arguments)] +// bookmark pub fn start_retrieval_thread( active_connections: Box, consensus_controller: Box, @@ -1284,6 +1310,7 @@ pub fn start_retrieval_thread( operation_cache: SharedOperationCache, cache: SharedBlockCache, storage: Storage, + mip_store: MipStore, ) -> JoinHandle<()> { let block_message_serializer = MessagesSerializer::new().with_block_message_serializer(BlockMessageSerializer::new()); @@ -1308,6 +1335,7 @@ pub fn start_retrieval_thread( operation_cache, config, storage, + mip_store, }; retrieval_thread.run(); }) diff --git a/massa-protocol-worker/src/handlers/endorsement_handler/cache.rs b/massa-protocol-worker/src/handlers/endorsement_handler/cache.rs index 7ad20e4df6c..4f18bbbdc17 100644 --- a/massa-protocol-worker/src/handlers/endorsement_handler/cache.rs +++ b/massa-protocol-worker/src/handlers/endorsement_handler/cache.rs @@ -1,23 +1,49 @@ -use std::{num::NonZeroUsize, sync::Arc}; +use std::{collections::HashSet, sync::Arc}; -use lru::LruCache; use massa_models::endorsement::EndorsementId; +use massa_protocol_exports::PeerId; use parking_lot::RwLock; -use peernet::peer_id::PeerId; +use schnellru::{ByLength, LruMap}; pub struct EndorsementCache { - pub checked_endorsements: LruCache, - pub endorsements_known_by_peer: LruCache>, + pub checked_endorsements: LruMap, + pub endorsements_known_by_peer: LruMap>, } impl EndorsementCache { - pub fn new( - max_known_endorsements: NonZeroUsize, - max_known_endorsements_by_peer: NonZeroUsize, - ) -> Self { + pub fn new(max_known_endorsements: u32, max_known_endorsements_by_peer: u32) -> Self { Self { - checked_endorsements: LruCache::new(max_known_endorsements), - endorsements_known_by_peer: LruCache::new(max_known_endorsements_by_peer), + checked_endorsements: LruMap::new(ByLength::new(max_known_endorsements)), + endorsements_known_by_peer: LruMap::new(ByLength::new(max_known_endorsements_by_peer)), + } + } + + pub fn update_cache( + &mut self, + peers_connected: HashSet, + max_known_endorsements_by_peer: u32, + ) { + let peers: Vec = self + .endorsements_known_by_peer + .iter() + .map(|(id, _)| id.clone()) + .collect(); + + // Clean shared cache if peers do not exist anymore + for peer_id in peers { + if !peers_connected.contains(&peer_id) { + self.endorsements_known_by_peer.remove(&peer_id); + } + } + + // Add new potential peers + for peer_id in peers_connected { + if self.endorsements_known_by_peer.peek(&peer_id).is_none() { + self.endorsements_known_by_peer.insert( + peer_id.clone(), + LruMap::new(ByLength::new(max_known_endorsements_by_peer)), + ); + } } } } diff --git a/massa-protocol-worker/src/handlers/endorsement_handler/messages.rs b/massa-protocol-worker/src/handlers/endorsement_handler/messages.rs index 0911a3ff51f..156fbe67859 100644 --- a/massa-protocol-worker/src/handlers/endorsement_handler/messages.rs +++ b/massa-protocol-worker/src/handlers/endorsement_handler/messages.rs @@ -2,7 +2,9 @@ use massa_models::{ endorsement::{Endorsement, EndorsementDeserializer, SecureShareEndorsement}, secure_share::{SecureShareDeserializer, SecureShareSerializer}, }; -use massa_serialization::{Deserializer, Serializer, U64VarIntDeserializer, U64VarIntSerializer}; +use massa_serialization::{ + Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, +}; use nom::{ error::{context, ContextError, ParseError}, multi::length_count, @@ -17,27 +19,23 @@ pub enum EndorsementMessage { Endorsements(Vec), } -impl EndorsementMessage { - pub fn get_id(&self) -> MessageTypeId { - match self { - EndorsementMessage::Endorsements(_) => MessageTypeId::Endorsements, - } - } - - pub fn max_id() -> u64 { - >::into(MessageTypeId::Endorsements) + 1 - } -} - -// DO NOT FORGET TO UPDATE MAX ID IF YOU UPDATE THERE #[derive(IntoPrimitive, Debug, Eq, PartialEq, TryFromPrimitive)] #[repr(u64)] pub enum MessageTypeId { Endorsements, } +impl From<&EndorsementMessage> for MessageTypeId { + fn from(message: &EndorsementMessage) -> Self { + match message { + EndorsementMessage::Endorsements(_) => MessageTypeId::Endorsements, + } + } +} + #[derive(Default, Clone)] pub struct EndorsementMessageSerializer { + id_serializer: U64VarIntSerializer, length_endorsements_serializer: U64VarIntSerializer, secure_share_serializer: SecureShareSerializer, } @@ -45,6 +43,7 @@ pub struct EndorsementMessageSerializer { impl EndorsementMessageSerializer { pub fn new() -> Self { Self { + id_serializer: U64VarIntSerializer::new(), length_endorsements_serializer: U64VarIntSerializer::new(), secure_share_serializer: SecureShareSerializer::new(), } @@ -57,6 +56,12 @@ impl Serializer for EndorsementMessageSerializer { value: &EndorsementMessage, buffer: &mut Vec, ) -> Result<(), massa_serialization::SerializeError> { + self.id_serializer.serialize( + &MessageTypeId::from(value).try_into().map_err(|_| { + SerializeError::GeneralError(String::from("Failed to serialize id")) + })?, + buffer, + )?; match value { EndorsementMessage::Endorsements(endorsements) => { self.length_endorsements_serializer @@ -78,7 +83,7 @@ pub struct EndorsementMessageDeserializerArgs { } pub struct EndorsementMessageDeserializer { - message_id: u64, + id_deserializer: U64VarIntDeserializer, length_endorsements_deserializer: U64VarIntDeserializer, secure_share_deserializer: SecureShareDeserializer, } @@ -86,7 +91,7 @@ pub struct EndorsementMessageDeserializer { impl EndorsementMessageDeserializer { pub fn new(args: EndorsementMessageDeserializerArgs) -> Self { Self { - message_id: 0, + id_deserializer: U64VarIntDeserializer::new(Included(0), Included(u64::MAX)), length_endorsements_deserializer: U64VarIntDeserializer::new( Included(0), Included(args.max_length_endorsements), @@ -97,10 +102,6 @@ impl EndorsementMessageDeserializer { )), } } - - pub fn set_message_id(&mut self, message_id: u64) { - self.message_id = message_id; - } } impl Deserializer for EndorsementMessageDeserializer { @@ -109,7 +110,8 @@ impl Deserializer for EndorsementMessageDeserializer { buffer: &'a [u8], ) -> IResult<&'a [u8], EndorsementMessage, E> { context("Failed EndorsementMessage deserialization", |buffer| { - let id = MessageTypeId::try_from(self.message_id).map_err(|_| { + let (buffer, raw_id) = self.id_deserializer.deserialize(buffer)?; + let id = MessageTypeId::try_from(raw_id).map_err(|_| { nom::Err::Error(ParseError::from_error_kind( buffer, nom::error::ErrorKind::Eof, diff --git a/massa-protocol-worker/src/handlers/endorsement_handler/propagation.rs b/massa-protocol-worker/src/handlers/endorsement_handler/propagation.rs index ef430df1c07..fb9740aa4ef 100644 --- a/massa-protocol-worker/src/handlers/endorsement_handler/propagation.rs +++ b/massa-protocol-worker/src/handlers/endorsement_handler/propagation.rs @@ -1,13 +1,12 @@ -use std::{num::NonZeroUsize, thread::JoinHandle}; +use std::thread::JoinHandle; use crossbeam::channel::Receiver; -use lru::LruCache; use massa_models::{ endorsement::{EndorsementId, SecureShareEndorsement}, prehash::{PreHashMap, PreHashSet}, }; +use massa_protocol_exports::PeerId; use massa_protocol_exports::ProtocolConfig; -use peernet::peer_id::PeerId; use tracing::{debug, info, log::warn}; use crate::{messages::MessagesSerializer, wrap_network::ActiveConnectionsTrait}; @@ -57,40 +56,29 @@ impl PropagationThread { { let mut cache_write = self.cache.write(); for endorsement_id in endorsements_ids.iter().copied() { - cache_write.checked_endorsements.put(endorsement_id, ()); + cache_write.checked_endorsements.insert(endorsement_id, ()); } // Add peers that potentially don't exist in cache - let peer_connected = + let peers_connected = self.active_connections.get_peer_ids_connected(); - for peer_id in &peer_connected { - if !cache_write.endorsements_known_by_peer.contains(peer_id) { - cache_write.endorsements_known_by_peer.put( - peer_id.clone(), - LruCache::new( - NonZeroUsize::new( - self.config.max_node_known_endorsements_size, - ) - .expect( - "max_node_known_endorsements_size in config is > 0", - ), - ), - ); - } - } - let peers: Vec = cache_write + cache_write.update_cache( + peers_connected, + self.config + .max_node_known_endorsements_size + .try_into() + .expect("max_node_known_endorsements_size is too big"), + ); + let all_keys: Vec = cache_write .endorsements_known_by_peer .iter() - .map(|(id, _)| id.clone()) + .map(|(k, _)| k) + .cloned() .collect(); - // Clean shared cache if peers do not exist anymore - for peer_id in peers { - if !peer_connected.contains(&peer_id) { - cache_write.endorsements_known_by_peer.pop(&peer_id); - } - } - for (peer_id, endorsement_ids) in - cache_write.endorsements_known_by_peer.iter_mut() - { + for peer_id in all_keys.iter() { + let endorsement_ids = cache_write + .endorsements_known_by_peer + .peek_mut(peer_id) + .unwrap(); let new_endorsements: PreHashMap< EndorsementId, SecureShareEndorsement, @@ -100,7 +88,7 @@ impl PropagationThread { .get_endorsement_refs() .iter() .filter_map(|id| { - if endorsement_ids.contains(id) { + if endorsement_ids.peek(id).is_some() { return None; } Some(( @@ -111,7 +99,7 @@ impl PropagationThread { .collect() }; for endorsement_id in new_endorsements.keys().copied() { - endorsement_ids.put(endorsement_id, ()); + endorsement_ids.insert(endorsement_id, ()); } let to_send = new_endorsements.into_values().collect::>(); diff --git a/massa-protocol-worker/src/handlers/endorsement_handler/retrieval.rs b/massa-protocol-worker/src/handlers/endorsement_handler/retrieval.rs index e223a861663..367fb04efac 100644 --- a/massa-protocol-worker/src/handlers/endorsement_handler/retrieval.rs +++ b/massa-protocol-worker/src/handlers/endorsement_handler/retrieval.rs @@ -1,10 +1,9 @@ -use std::{num::NonZeroUsize, thread::JoinHandle}; +use std::thread::JoinHandle; use crossbeam::{ channel::{Receiver, Sender}, select, }; -use lru::LruCache; use massa_logging::massa_trace; use massa_models::{ endorsement::SecureShareEndorsement, @@ -12,11 +11,12 @@ use massa_models::{ timeslots::get_block_slot_timestamp, }; use massa_pool_exports::PoolController; +use massa_protocol_exports::PeerId; use massa_protocol_exports::{ProtocolConfig, ProtocolError}; use massa_serialization::{DeserializeError, Deserializer}; use massa_storage::Storage; use massa_time::MassaTime; -use peernet::peer_id::PeerId; +use schnellru::{ByLength, LruMap}; use tracing::{debug, info, warn}; use crate::{ @@ -47,7 +47,7 @@ pub struct RetrievalThread { impl RetrievalThread { fn run(&mut self) { - let mut endorsement_message_deserializer = + let endorsement_message_deserializer = EndorsementMessageDeserializer::new(EndorsementMessageDeserializerArgs { thread_count: self.config.thread_count, max_length_endorsements: self.config.max_endorsements_per_message, @@ -57,8 +57,7 @@ impl RetrievalThread { select! { recv(self.receiver) -> msg => { match msg { - Ok((peer_id, message_id, message)) => { - endorsement_message_deserializer.set_message_id(message_id); + Ok((peer_id, message)) => { let (rest, message) = match endorsement_message_deserializer .deserialize::(&message) { Ok((rest, message)) => (rest, message), @@ -140,7 +139,11 @@ impl RetrievalThread { // check endorsement signature if not already checked { let read_cache = self.cache.read(); - if !read_cache.checked_endorsements.contains(&endorsement_id) { + if read_cache + .checked_endorsements + .peek(&endorsement_id) + .is_none() + { new_endorsements.insert(endorsement_id, endorsement); } } @@ -160,24 +163,29 @@ impl RetrievalThread { }) .collect::>(), )?; - { + 'write_cache: { let mut cache_write = self.cache.write(); // add to verified signature cache for endorsement_id in endorsement_ids.iter() { - cache_write.checked_endorsements.put(*endorsement_id, ()); + cache_write.checked_endorsements.insert(*endorsement_id, ()); } // add to known endorsements for source node. - let endorsements = cache_write.endorsements_known_by_peer.get_or_insert_mut( - from_peer_id.clone(), - || { - LruCache::new( - NonZeroUsize::new(self.config.max_node_known_endorsements_size) + let Ok(endorsements) = cache_write + .endorsements_known_by_peer + .get_or_insert(from_peer_id.clone(), || { + LruMap::new(ByLength::new( + self.config + .max_node_known_endorsements_size + .try_into() .expect("max_node_known_endorsements_size in config should be > 0"), - ) - }, - ); + )) + }) + .ok_or(()) else { + warn!("endorsements_known_by_peer limit reached"); + break 'write_cache; + }; for endorsement_id in endorsement_ids.iter() { - endorsements.put(*endorsement_id, ()); + endorsements.insert(*endorsement_id, ()); } } @@ -220,7 +228,7 @@ impl RetrievalThread { .collect() }; endorsements_to_propagate.drop_endorsement_refs(&endorsements_to_not_propagate); - if let Err(err) = self.internal_sender.send( + if let Err(err) = self.internal_sender.try_send( EndorsementHandlerPropagationCommand::PropagateEndorsements( endorsements_to_propagate, ), @@ -238,7 +246,7 @@ impl RetrievalThread { fn ban_node(&mut self, peer_id: &PeerId) -> Result<(), ProtocolError> { massa_trace!("ban node from retrieval thread", { "peer_id": peer_id.to_string() }); self.peer_cmd_sender - .send(PeerManagementCmd::Ban(vec![peer_id.clone()])) + .try_send(PeerManagementCmd::Ban(vec![peer_id.clone()])) .map_err(|err| ProtocolError::SendError(err.to_string())) } } diff --git a/massa-protocol-worker/src/handlers/operation_handler/cache.rs b/massa-protocol-worker/src/handlers/operation_handler/cache.rs index 27f0998b3ba..bd194b00b49 100644 --- a/massa-protocol-worker/src/handlers/operation_handler/cache.rs +++ b/massa-protocol-worker/src/handlers/operation_handler/cache.rs @@ -1,29 +1,54 @@ -use std::{num::NonZeroUsize, sync::Arc}; +use std::{collections::HashSet, sync::Arc}; -use lru::LruCache; use massa_models::operation::{OperationId, OperationPrefixId}; +use massa_protocol_exports::PeerId; use parking_lot::RwLock; -use peernet::peer_id::PeerId; +use schnellru::{ByLength, LruMap}; pub struct OperationCache { - pub checked_operations: LruCache, - pub checked_operations_prefix: LruCache, - pub ops_known_by_peer: LruCache>, + pub checked_operations: LruMap, + pub checked_operations_prefix: LruMap, + pub ops_known_by_peer: LruMap>, } impl OperationCache { - pub fn new(max_known_ops: NonZeroUsize, max_known_ops_by_peer: NonZeroUsize) -> Self { + pub fn new(max_known_ops: u32, max_known_ops_by_peer: u32) -> Self { Self { - checked_operations: LruCache::new(max_known_ops), - checked_operations_prefix: LruCache::new(max_known_ops), - ops_known_by_peer: LruCache::new(max_known_ops_by_peer), + checked_operations: LruMap::new(ByLength::new(max_known_ops)), + checked_operations_prefix: LruMap::new(ByLength::new(max_known_ops)), + ops_known_by_peer: LruMap::new(ByLength::new(max_known_ops_by_peer)), } } pub fn insert_checked_operation(&mut self, operation_id: OperationId) { - self.checked_operations.put(operation_id, ()); + self.checked_operations.insert(operation_id, ()); self.checked_operations_prefix - .put(operation_id.prefix(), ()); + .insert(operation_id.prefix(), ()); + } + + pub fn update_cache(&mut self, peers_connected: HashSet, max_known_ops_by_peer: u32) { + let peers: Vec = self + .ops_known_by_peer + .iter() + .map(|(id, _)| id.clone()) + .collect(); + + // Clean shared cache if peers do not exist anymore + for peer_id in peers { + if !peers_connected.contains(&peer_id) { + self.ops_known_by_peer.remove(&peer_id); + } + } + + // Add new potential peers + for peer_id in peers_connected { + if self.ops_known_by_peer.peek(&peer_id).is_none() { + self.ops_known_by_peer.insert( + peer_id.clone(), + LruMap::new(ByLength::new(max_known_ops_by_peer)), + ); + } + } } } diff --git a/massa-protocol-worker/src/handlers/operation_handler/messages.rs b/massa-protocol-worker/src/handlers/operation_handler/messages.rs index 08afa651fa6..fe9c75f3407 100644 --- a/massa-protocol-worker/src/handlers/operation_handler/messages.rs +++ b/massa-protocol-worker/src/handlers/operation_handler/messages.rs @@ -2,12 +2,15 @@ use massa_models::operation::{ OperationPrefixIds, OperationPrefixIdsDeserializer, OperationPrefixIdsSerializer, OperationsDeserializer, OperationsSerializer, SecureShareOperation, }; -use massa_serialization::{Deserializer, SerializeError, Serializer}; +use massa_serialization::{ + Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, +}; use nom::{ error::{context, ContextError, ParseError}, IResult, Parser, }; use num_enum::{IntoPrimitive, TryFromPrimitive}; +use std::ops::Bound::Included; #[derive(Debug)] pub enum OperationMessage { @@ -19,21 +22,6 @@ pub enum OperationMessage { Operations(Vec), } -impl OperationMessage { - pub fn get_id(&self) -> MessageTypeId { - match self { - OperationMessage::OperationsAnnouncement(_) => MessageTypeId::OperationsAnnouncement, - OperationMessage::AskForOperations(_) => MessageTypeId::AskForOperations, - OperationMessage::Operations(_) => MessageTypeId::Operations, - } - } - - pub fn max_id() -> u64 { - >::into(MessageTypeId::Operations) + 1 - } -} - -// DO NOT FORGET TO UPDATE MAX ID IF YOU UPDATE THERE #[derive(IntoPrimitive, Debug, Eq, PartialEq, TryFromPrimitive)] #[repr(u64)] pub enum MessageTypeId { @@ -42,8 +30,19 @@ pub enum MessageTypeId { Operations = 2, } +impl From<&OperationMessage> for MessageTypeId { + fn from(message: &OperationMessage) -> Self { + match message { + OperationMessage::OperationsAnnouncement(_) => MessageTypeId::OperationsAnnouncement, + OperationMessage::AskForOperations(_) => MessageTypeId::AskForOperations, + OperationMessage::Operations(_) => MessageTypeId::Operations, + } + } +} + #[derive(Default, Clone)] pub struct OperationMessageSerializer { + id_serializer: U64VarIntSerializer, operation_prefix_ids_serializer: OperationPrefixIdsSerializer, operations_serializer: OperationsSerializer, } @@ -51,6 +50,7 @@ pub struct OperationMessageSerializer { impl OperationMessageSerializer { pub fn new() -> Self { Self { + id_serializer: U64VarIntSerializer::new(), operation_prefix_ids_serializer: OperationPrefixIdsSerializer::new(), operations_serializer: OperationsSerializer::new(), } @@ -63,6 +63,12 @@ impl Serializer for OperationMessageSerializer { value: &OperationMessage, buffer: &mut Vec, ) -> Result<(), SerializeError> { + self.id_serializer.serialize( + &MessageTypeId::from(value).try_into().map_err(|_| { + SerializeError::GeneralError(String::from("Failed to serialize id")) + })?, + buffer, + )?; match value { OperationMessage::OperationsAnnouncement(operations) => { self.operation_prefix_ids_serializer @@ -81,9 +87,9 @@ impl Serializer for OperationMessageSerializer { } pub struct OperationMessageDeserializer { + id_deserializer: U64VarIntDeserializer, operation_prefix_ids_deserializer: OperationPrefixIdsDeserializer, operations_deserializer: OperationsDeserializer, - message_id: u64, } /// Limits used in the deserialization of `OperationMessage` @@ -110,6 +116,7 @@ pub struct OperationMessageDeserializerArgs { impl OperationMessageDeserializer { pub fn new(args: OperationMessageDeserializerArgs) -> Self { Self { + id_deserializer: U64VarIntDeserializer::new(Included(0), Included(u64::MAX)), operation_prefix_ids_deserializer: OperationPrefixIdsDeserializer::new( args.max_operations_prefix_ids, ), @@ -122,13 +129,8 @@ impl OperationMessageDeserializer { args.max_op_datastore_key_length, args.max_op_datastore_value_length, ), - message_id: 0, } } - - pub fn set_message_id(&mut self, id: u64) { - self.message_id = id; - } } impl Deserializer for OperationMessageDeserializer { @@ -137,7 +139,8 @@ impl Deserializer for OperationMessageDeserializer { buffer: &'a [u8], ) -> IResult<&'a [u8], OperationMessage, E> { context("Failed OperationMessage deserialization", |buffer| { - let id = MessageTypeId::try_from(self.message_id).map_err(|_| { + let (buffer, raw_id) = self.id_deserializer.deserialize(buffer)?; + let id = MessageTypeId::try_from(raw_id).map_err(|_| { nom::Err::Error(ParseError::from_error_kind( buffer, nom::error::ErrorKind::Eof, diff --git a/massa-protocol-worker/src/handlers/operation_handler/propagation.rs b/massa-protocol-worker/src/handlers/operation_handler/propagation.rs index be38c1abab5..f1d352f653e 100644 --- a/massa-protocol-worker/src/handlers/operation_handler/propagation.rs +++ b/massa-protocol-worker/src/handlers/operation_handler/propagation.rs @@ -1,11 +1,10 @@ -use std::{mem, num::NonZeroUsize, thread::JoinHandle}; +use std::{mem, thread::JoinHandle}; use crossbeam::channel::{Receiver, RecvTimeoutError}; -use lru::LruCache; use massa_logging::massa_trace; use massa_models::operation::OperationId; +use massa_protocol_exports::PeerId; use massa_protocol_exports::ProtocolConfig; -use peernet::peer_id::PeerId; use tracing::{debug, info, log::warn}; use crate::{ @@ -86,43 +85,32 @@ impl PropagationThread { }); { let mut cache_write = self.cache.write(); - let peers: Vec = cache_write - .ops_known_by_peer - .iter() - .map(|(id, _)| id.clone()) - .collect(); let peers_connected = self.active_connections.get_peer_ids_connected(); - // Clean shared cache if peers do not exist anymore - - for peer_id in peers { - if !peers_connected.contains(&peer_id) { - cache_write.ops_known_by_peer.pop(&peer_id); - } - } - - // Add new potential peers - for peer_id in peers_connected { - if !cache_write.ops_known_by_peer.contains(&peer_id) { - cache_write.ops_known_by_peer.put( - peer_id.clone(), - LruCache::new( - NonZeroUsize::new(self.config.max_node_known_ops_size) - .expect("max_node_known_endorsements_size in config is > 0"), - ), - ); - } - } + cache_write.update_cache( + peers_connected, + self.config + .max_node_known_ops_size + .try_into() + .expect("max_node_known_ops_size is too big"), + ); // Propagate to peers - for (peer_id, ops) in cache_write.ops_known_by_peer.iter_mut() { + let all_keys: Vec = cache_write + .ops_known_by_peer + .iter() + .map(|(k, _)| k) + .cloned() + .collect(); + for peer_id in all_keys { + let ops = cache_write.ops_known_by_peer.peek_mut(&peer_id).unwrap(); let new_ops: Vec = operation_ids .iter() - .filter(|id| !ops.contains(&id.prefix())) + .filter(|id| ops.peek(&id.prefix()).is_none()) .copied() .collect(); if !new_ops.is_empty() { for id in &new_ops { - ops.put(id.prefix(), ()); + ops.insert(id.prefix(), ()); } debug!( "Send operations announcement of len {} to {}", @@ -132,7 +120,7 @@ impl PropagationThread { for sub_list in new_ops.chunks(self.config.max_operations_per_message as usize) { if let Err(err) = self.active_connections.send_to_peer( - peer_id, + &peer_id, &self.operation_message_serializer, OperationMessage::OperationsAnnouncement( sub_list.iter().map(|id| id.into_prefix()).collect(), diff --git a/massa-protocol-worker/src/handlers/operation_handler/retrieval.rs b/massa-protocol-worker/src/handlers/operation_handler/retrieval.rs index 23e71063c7a..a1cb1551486 100644 --- a/massa-protocol-worker/src/handlers/operation_handler/retrieval.rs +++ b/massa-protocol-worker/src/handlers/operation_handler/retrieval.rs @@ -1,6 +1,5 @@ use std::{ collections::{HashMap, VecDeque}, - num::NonZeroUsize, thread::JoinHandle, time::Instant, }; @@ -9,7 +8,6 @@ use crossbeam::{ channel::{tick, Receiver, Sender}, select, }; -use lru::LruCache; use massa_logging::massa_trace; use massa_models::{ operation::{OperationId, OperationPrefixId, OperationPrefixIds, SecureShareOperation}, @@ -19,11 +17,12 @@ use massa_models::{ timeslots::get_block_slot_timestamp, }; use massa_pool_exports::PoolController; +use massa_protocol_exports::PeerId; use massa_protocol_exports::{ProtocolConfig, ProtocolError}; use massa_serialization::{DeserializeError, Deserializer}; use massa_storage::Storage; use massa_time::{MassaTime, TimeError}; -use peernet::peer_id::PeerId; +use schnellru::{ByLength, LruMap}; use crate::{ handlers::peer_handler::models::{PeerManagementCmd, PeerMessageTuple}, @@ -57,7 +56,7 @@ pub struct RetrievalThread { receiver: Receiver, pool_controller: Box, cache: SharedOperationCache, - asked_operations: LruCache)>, + asked_operations: LruMap)>, active_connections: Box, op_batch_buffer: VecDeque, stored_operations: HashMap>, @@ -71,7 +70,7 @@ pub struct RetrievalThread { impl RetrievalThread { fn run(&mut self) { - let mut operation_message_deserializer = + let operation_message_deserializer = OperationMessageDeserializer::new(OperationMessageDeserializerArgs { max_operations_prefix_ids: self.config.max_operations_per_message as u32, max_operations: self.config.max_operations_per_message as u32, @@ -88,8 +87,7 @@ impl RetrievalThread { select! { recv(self.receiver) -> msg => { match msg { - Ok((peer_id, message_id, message)) => { - operation_message_deserializer.set_message_id(message_id); + Ok((peer_id, message)) => { let (rest, message) = match operation_message_deserializer .deserialize::(&message) { Ok((rest, message)) => (rest, message), @@ -194,7 +192,13 @@ impl RetrievalThread { received_ids.insert(operation_id); // Check operation signature only if not already checked. - if !self.cache.read().checked_operations.contains(&operation_id) { + if self + .cache + .read() + .checked_operations + .peek(&operation_id) + .is_none() + { // check signature if the operation wasn't in `checked_operation` new_operations.insert(operation_id, operation); }; @@ -208,7 +212,7 @@ impl RetrievalThread { .collect::>(), )?; - { + 'write_cache: { // add to checked operations let mut cache_write = self.cache.write(); for op_id in new_operations.keys().copied() { @@ -216,17 +220,22 @@ impl RetrievalThread { } // add to known ops - let known_ops = - cache_write - .ops_known_by_peer - .get_or_insert_mut(source_peer_id.clone(), || { - LruCache::new( - NonZeroUsize::new(self.config.max_node_known_ops_size) - .expect("max_node_known_ops_size in config must be > 0"), - ) - }); + let Ok(known_ops) = cache_write + .ops_known_by_peer + .get_or_insert(source_peer_id.clone(), || { + LruMap::new(ByLength::new( + self.config + .max_node_known_ops_size + .try_into() + .expect("max_node_known_ops_size in config must be > 0"), + )) + }) + .ok_or(()) else { + warn!("ops_known_by_peer limitation reached"); + break 'write_cache; + }; for id in received_ids { - known_ops.put(id.prefix(), ()); + known_ops.insert(id.prefix(), ()); } } @@ -271,7 +280,7 @@ impl RetrievalThread { .insert(Instant::now(), to_announce.clone()); self.storage.extend(ops_to_propagate); self.internal_sender - .send(OperationHandlerPropagationCommand::AnnounceOperations( + .try_send(OperationHandlerPropagationCommand::AnnounceOperations( to_announce, )) .map_err(|err| ProtocolError::SendError(err.to_string()))?; @@ -308,26 +317,31 @@ impl RetrievalThread { peer_id: &PeerId, ) -> Result<(), ProtocolError> { // mark sender as knowing the ops - { + 'write_cache: { let mut cache_write = self.cache.write(); - let known_ops = - cache_write - .ops_known_by_peer - .get_or_insert_mut(peer_id.clone(), || { - LruCache::new( - NonZeroUsize::new(self.config.max_node_known_ops_size) - .expect("max_node_known_ops_size in config must be > 0"), - ) - }); + let Ok(known_ops) = cache_write + .ops_known_by_peer + .get_or_insert(peer_id.clone(), || { + LruMap::new(ByLength::new( + self.config + .max_node_known_ops_size + .try_into() + .expect("max_node_known_ops_size in config must be > 0"), + )) + }) + .ok_or(()) else { + warn!("ops_known_by_peer limitation reached"); + break 'write_cache; + }; for prefix in &op_batch { - known_ops.put(*prefix, ()); + known_ops.insert(*prefix, ()); } } // filter out the operations that we already know about { let cache_read = self.cache.read(); - op_batch.retain(|prefix| !cache_read.checked_operations_prefix.contains(prefix)); + op_batch.retain(|prefix| cache_read.checked_operations_prefix.peek(prefix).is_none()); } let mut ask_set = OperationPrefixIds::with_capacity(op_batch.len()); @@ -336,7 +350,7 @@ impl RetrievalThread { let now = Instant::now(); let mut count_reask = 0; for op_id in op_batch { - let wish = match self.asked_operations.get_mut(&op_id) { + let wish = match self.asked_operations.get(&op_id) { Some(wish) => { if wish.1.contains(peer_id) { continue; // already asked to the `peer_id` @@ -364,7 +378,7 @@ impl RetrievalThread { } else { ask_set.insert(op_id); self.asked_operations - .put(op_id, (now, vec![peer_id.clone()])); + .insert(op_id, (now, vec![peer_id.clone()])); } } // EndOf for op_id in op_batch: @@ -405,7 +419,7 @@ impl RetrievalThread { warn!("Failed to send AskForOperations message to peer: {}", err); { let mut cache_write = self.cache.write(); - cache_write.ops_known_by_peer.pop(peer_id); + cache_write.ops_known_by_peer.remove(peer_id); } } } @@ -469,7 +483,7 @@ impl RetrievalThread { warn!("Failed to send Operations message to peer: {}", err); { let mut cache_write = self.cache.write(); - cache_write.ops_known_by_peer.pop(peer_id); + cache_write.ops_known_by_peer.remove(peer_id); } } } @@ -480,14 +494,14 @@ impl RetrievalThread { fn ban_node(&mut self, peer_id: &PeerId) -> Result<(), ProtocolError> { massa_trace!("ban node from retrieval thread", { "peer_id": peer_id.to_string() }); self.peer_cmd_sender - .send(PeerManagementCmd::Ban(vec![peer_id.clone()])) + .try_send(PeerManagementCmd::Ban(vec![peer_id.clone()])) .map_err(|err| ProtocolError::SendError(err.to_string())) } } #[allow(clippy::too_many_arguments)] pub fn start_retrieval_thread( - receiver: Receiver<(PeerId, u64, Vec)>, + receiver: Receiver, pool_controller: Box, storage: Storage, config: ProtocolConfig, @@ -509,10 +523,12 @@ pub fn start_retrieval_thread( receiver_ext, cache, active_connections, - asked_operations: LruCache::new( - NonZeroUsize::new(config.asked_operations_buffer_capacity) + asked_operations: LruMap::new(ByLength::new( + config + .asked_operations_buffer_capacity + .try_into() .expect("asked_operations_buffer_capacity in config must be > 0"), - ), + )), config, operation_message_serializer: MessagesSerializer::new() .with_operation_message_serializer(OperationMessageSerializer::new()), diff --git a/massa-protocol-worker/src/handlers/peer_handler/announcement.rs b/massa-protocol-worker/src/handlers/peer_handler/announcement.rs index 09ebf44a34e..07bf60694bd 100644 --- a/massa-protocol-worker/src/handlers/peer_handler/announcement.rs +++ b/massa-protocol-worker/src/handlers/peer_handler/announcement.rs @@ -2,10 +2,12 @@ use std::{ collections::HashMap, net::{IpAddr, SocketAddr}, ops::Bound::Included, - time::{SystemTime, UNIX_EPOCH}, }; +use massa_hash::Hash; use massa_models::serialization::IpAddrDeserializer; +use massa_signature::{KeyPair, Signature, SignatureDeserializer}; +use massa_time::MassaTime; use nom::{ error::{context, ContextError, ParseError}, multi::length_count, @@ -15,11 +17,11 @@ use nom::{ use peernet::{ error::{PeerNetError, PeerNetResult}, transports::TransportType, - types::{Hash, KeyPair, Signature}, }; use massa_serialization::{ - Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, + DeserializeError, Deserializer, SerializeError, Serializer, U64VarIntDeserializer, + U64VarIntSerializer, }; #[derive(Clone, Debug, PartialEq, Eq)] @@ -27,7 +29,7 @@ pub struct Announcement { /// Listeners pub listeners: HashMap, /// Timestamp - pub timestamp: u128, + pub timestamp: u64, /// Hash pub hash: Hash, /// serialized version @@ -113,13 +115,31 @@ impl Deserializer for AnnouncementDeserializer { }), ), context("Failed timestamp deserialization", |buffer: &'a [u8]| { - let timestamp = u128::from_be_bytes(buffer[..16].try_into().map_err(|_| { - nom::Err::Error(ParseError::from_error_kind( - buffer, - nom::error::ErrorKind::LengthValue, - )) - })?); - Ok((&buffer[16..], timestamp)) + let timestamp = u64::from_be_bytes( + buffer + .get(..8) + .ok_or(nom::Err::Error(ParseError::from_error_kind( + buffer, + nom::error::ErrorKind::LengthValue, + )))? + .try_into() + .map_err(|_| { + nom::Err::Error(ParseError::from_error_kind( + buffer, + nom::error::ErrorKind::LengthValue, + )) + })?, + ); + + Ok(( + buffer + .get(8..) + .ok_or(nom::Err::Error(ParseError::from_error_kind( + buffer, + nom::error::ErrorKind::LengthValue, + )))?, + timestamp, + )) }), )), ) @@ -127,18 +147,15 @@ impl Deserializer for AnnouncementDeserializer { .parse(buffer)?; let serialized = buffer[..buffer.len() - rest.len()].to_vec(); let hash = Hash::compute_from(&serialized); - let signature = Signature::from_bytes(&rest[..64].try_into().map_err(|_| { - nom::Err::Error(ParseError::from_error_kind( - rest, - nom::error::ErrorKind::LengthValue, - )) - })?) - .map_err(|_| { - nom::Err::Error(ParseError::from_error_kind( - rest, - nom::error::ErrorKind::Verify, - )) - })?; + let signature_deserializer = SignatureDeserializer::new(); + let (rest, signature) = signature_deserializer + .deserialize::(rest) + .map_err(|_| { + nom::Err::Error(ParseError::from_error_kind( + rest, + nom::error::ErrorKind::Verify, + )) + })?; Ok(( rest, Announcement { @@ -187,10 +204,9 @@ impl Announcement { buf.extend_from_slice(&port_bytes); buf.push(*listener.1 as u8); } - let timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time went backward") - .as_millis(); + let timestamp = MassaTime::now() + .expect("Unable to get MassaTime::now") + .to_millis(); buf.extend(timestamp.to_be_bytes()); let hash = Hash::compute_from(&buf); Ok(Self { @@ -211,7 +227,8 @@ mod tests { Announcement, AnnouncementDeserializer, AnnouncementDeserializerArgs, }; use massa_serialization::{DeserializeError, Deserializer, Serializer}; - use peernet::{transports::TransportType, types::KeyPair}; + use massa_signature::KeyPair; + use peernet::transports::TransportType; use std::collections::HashMap; use super::AnnouncementSerializer; @@ -221,7 +238,8 @@ mod tests { let mut listeners = HashMap::new(); listeners.insert("127.0.0.1:8081".parse().unwrap(), TransportType::Tcp); listeners.insert("127.0.0.1:8082".parse().unwrap(), TransportType::Quic); - let announcement = Announcement::new(listeners, None, &KeyPair::generate()).unwrap(); + let announcement = + Announcement::new(listeners, None, &KeyPair::generate(0).unwrap()).unwrap(); let announcement_serializer = AnnouncementSerializer::new(); let announcement_deserializer = AnnouncementDeserializer::new(AnnouncementDeserializerArgs { max_listeners: 100 }); diff --git a/massa-protocol-worker/src/handlers/peer_handler/messages.rs b/massa-protocol-worker/src/handlers/peer_handler/messages.rs index aa8684ce2d2..66f9e2afbd3 100644 --- a/massa-protocol-worker/src/handlers/peer_handler/messages.rs +++ b/massa-protocol-worker/src/handlers/peer_handler/messages.rs @@ -1,7 +1,10 @@ use std::{collections::HashMap, net::SocketAddr, ops::Bound::Included}; use massa_models::serialization::{IpAddrDeserializer, IpAddrSerializer}; -use massa_serialization::{Deserializer, Serializer, U64VarIntDeserializer, U64VarIntSerializer}; +use massa_protocol_exports::{PeerId, PeerIdDeserializer, PeerIdSerializer}; +use massa_serialization::{ + Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, +}; use nom::{ error::{context, ContextError, ParseError}, multi::length_count, @@ -9,7 +12,7 @@ use nom::{ IResult, Parser, }; use num_enum::{IntoPrimitive, TryFromPrimitive}; -use peernet::{peer_id::PeerId, transports::TransportType, types::PUBLIC_KEY_SIZE_BYTES}; +use peernet::transports::TransportType; #[derive(Debug, Clone)] //TODO: Fix this clippy warning @@ -21,20 +24,6 @@ pub enum PeerManagementMessage { ListPeers(Vec<(PeerId, HashMap)>), } -impl PeerManagementMessage { - pub fn get_id(&self) -> MessageTypeId { - match self { - PeerManagementMessage::NewPeerConnected(_) => MessageTypeId::NewPeerConnected, - PeerManagementMessage::ListPeers(_) => MessageTypeId::ListPeers, - } - } - - pub fn max_id() -> u64 { - >::into(MessageTypeId::ListPeers) + 1 - } -} - -// DO NOT FORGET TO UPDATE MAX ID IF YOU UPDATE THERE #[derive(IntoPrimitive, Debug, Eq, PartialEq, TryFromPrimitive)] #[repr(u64)] pub enum MessageTypeId { @@ -42,17 +31,30 @@ pub enum MessageTypeId { ListPeers = 1, } +impl From<&PeerManagementMessage> for MessageTypeId { + fn from(message: &PeerManagementMessage) -> Self { + match message { + PeerManagementMessage::NewPeerConnected(_) => MessageTypeId::NewPeerConnected, + PeerManagementMessage::ListPeers(_) => MessageTypeId::ListPeers, + } + } +} + #[derive(Default, Clone)] pub struct PeerManagementMessageSerializer { + id_serializer: U64VarIntSerializer, length_serializer: U64VarIntSerializer, ip_addr_serializer: IpAddrSerializer, + peer_id_serializer: PeerIdSerializer, } impl PeerManagementMessageSerializer { pub fn new() -> Self { Self { + id_serializer: U64VarIntSerializer::new(), length_serializer: U64VarIntSerializer::new(), ip_addr_serializer: IpAddrSerializer::new(), + peer_id_serializer: PeerIdSerializer::new(), } } } @@ -63,9 +65,15 @@ impl Serializer for PeerManagementMessageSerializer { value: &PeerManagementMessage, buffer: &mut Vec, ) -> Result<(), massa_serialization::SerializeError> { + self.id_serializer.serialize( + &MessageTypeId::from(value).try_into().map_err(|_| { + SerializeError::GeneralError(String::from("Failed to serialize id")) + })?, + buffer, + )?; match value { PeerManagementMessage::NewPeerConnected((peer_id, listeners)) => { - buffer.extend_from_slice(&peer_id.to_bytes()); + self.peer_id_serializer.serialize(peer_id, buffer)?; self.length_serializer .serialize(&(listeners.len() as u64), buffer)?; for (socket_addr, transport_type) in listeners { @@ -79,7 +87,7 @@ impl Serializer for PeerManagementMessageSerializer { self.length_serializer .serialize(&(peers.len() as u64), buffer)?; for (peer_id, listeners) in peers { - buffer.extend_from_slice(&peer_id.to_bytes()); + self.peer_id_serializer.serialize(peer_id, buffer)?; self.length_serializer .serialize(&(listeners.len() as u64), buffer)?; for (socket_addr, transport_type) in listeners { @@ -96,10 +104,11 @@ impl Serializer for PeerManagementMessageSerializer { } pub struct PeerManagementMessageDeserializer { - message_id: u64, + id_deserializer: U64VarIntDeserializer, listeners_length_deserializer: U64VarIntDeserializer, peers_length_deserializer: U64VarIntDeserializer, ip_addr_deserializer: IpAddrDeserializer, + peer_id_deserializer: PeerIdDeserializer, } /// Limits used in the deserialization of `OperationMessage` @@ -113,7 +122,7 @@ pub struct PeerManagementMessageDeserializerArgs { impl PeerManagementMessageDeserializer { pub fn new(limits: PeerManagementMessageDeserializerArgs) -> Self { Self { - message_id: 0, + id_deserializer: U64VarIntDeserializer::new(Included(0), Included(u64::MAX)), listeners_length_deserializer: U64VarIntDeserializer::new( Included(0), Included(limits.max_listeners_per_peer), @@ -123,12 +132,9 @@ impl PeerManagementMessageDeserializer { Included(limits.max_peers_per_announcement), ), ip_addr_deserializer: IpAddrDeserializer::new(), + peer_id_deserializer: PeerIdDeserializer::new(), } } - - pub fn set_message(&mut self, message_id: u64) { - self.message_id = message_id; - } } impl Deserializer for PeerManagementMessageDeserializer { @@ -137,7 +143,8 @@ impl Deserializer for PeerManagementMessageDeserializer { buffer: &'a [u8], ) -> IResult<&'a [u8], PeerManagementMessage, E> { context("Failed PeerManagementMessage deserialization", |buffer| { - let id = MessageTypeId::try_from(self.message_id).map_err(|_| { + let (buffer, raw_id) = self.id_deserializer.deserialize(buffer)?; + let id = MessageTypeId::try_from(raw_id).map_err(|_| { nom::Err::Error(ParseError::from_error_kind( buffer, nom::error::ErrorKind::Eof, @@ -148,21 +155,7 @@ impl Deserializer for PeerManagementMessageDeserializer { "Failed NewPeerConnected deserialization", tuple(( context("Failed PeerId deserialization", |buffer: &'a [u8]| { - let peer_id = PeerId::from_bytes( - buffer[..PUBLIC_KEY_SIZE_BYTES].try_into().map_err(|_| { - nom::Err::Error(ParseError::from_error_kind( - buffer, - nom::error::ErrorKind::LengthValue, - )) - })?, - ) - .map_err(|_| { - nom::Err::Error(ParseError::from_error_kind( - buffer, - nom::error::ErrorKind::Eof, - )) - })?; - Ok((&buffer[PUBLIC_KEY_SIZE_BYTES..], peer_id)) + self.peer_id_deserializer.deserialize(buffer) }), length_count( context("Failed length listeners deserialization", |buffer| { @@ -192,23 +185,7 @@ impl Deserializer for PeerManagementMessageDeserializer { "Failed peer deserialization", tuple(( context("Failed PeerId deserialization", |buffer: &'a [u8]| { - let peer_id = PeerId::from_bytes( - buffer[..PUBLIC_KEY_SIZE_BYTES].try_into().map_err( - |_| { - nom::Err::Error(ParseError::from_error_kind( - buffer, - nom::error::ErrorKind::Eof, - )) - }, - )?, - ) - .map_err(|_| { - nom::Err::Error(ParseError::from_error_kind( - buffer, - nom::error::ErrorKind::Eof, - )) - })?; - Ok((&buffer[PUBLIC_KEY_SIZE_BYTES..], peer_id)) + self.peer_id_deserializer.deserialize(buffer) }), length_count( context("Failed length listeners deserialization", |buffer| { @@ -271,7 +248,16 @@ fn listener_deserializer<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( ))) } }; - Ok((&buffer[1..], transport_type)) + + Ok(( + buffer + .get(1..) + .ok_or(nom::Err::Error(ParseError::from_error_kind( + buffer, + nom::error::ErrorKind::LengthValue, + )))?, + transport_type, + )) }, ) .parse(buffer) @@ -287,17 +273,18 @@ fn listener_deserializer<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( mod tests { use std::collections::HashMap; - use massa_serialization::{DeserializeError, Deserializer, Serializer}; - use peernet::{peer_id::PeerId, transports::TransportType, types::KeyPair}; - use super::{ PeerManagementMessage, PeerManagementMessageDeserializer, PeerManagementMessageDeserializerArgs, PeerManagementMessageSerializer, }; + use massa_protocol_exports::PeerId; + use massa_serialization::{DeserializeError, Deserializer, Serializer}; + use massa_signature::KeyPair; + use peernet::transports::TransportType; #[test] fn test_peer_connected() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let mut listeners = HashMap::new(); listeners.insert("127.0.0.1:33036".parse().unwrap(), TransportType::Tcp); listeners.insert("127.0.0.1:33035".parse().unwrap(), TransportType::Quic); @@ -307,18 +294,15 @@ mod tests { let msg = PeerManagementMessage::NewPeerConnected(( PeerId::from_public_key(keypair.get_public_key()), listeners.clone(), - )) - .into(); + )); serializer.serialize(&msg, &mut buffer).unwrap(); - let mut deserializer = + let deserializer = PeerManagementMessageDeserializer::new(PeerManagementMessageDeserializerArgs { max_listeners_per_peer: 1000, max_peers_per_announcement: 1000, }); - deserializer.set_message(0); - let (rest, message) = deserializer .deserialize::(&buffer) .unwrap(); @@ -339,10 +323,10 @@ mod tests { #[test] fn test_list_peers() { - let keypair1 = KeyPair::generate(); + let keypair1 = KeyPair::generate(0).unwrap(); let mut listeners = HashMap::new(); listeners.insert("127.0.0.1:33036".parse().unwrap(), TransportType::Tcp); - let keypair2 = KeyPair::generate(); + let keypair2 = KeyPair::generate(0).unwrap(); let message = PeerManagementMessage::ListPeers(vec![ ( PeerId::from_public_key(keypair1.get_public_key()), @@ -357,12 +341,11 @@ mod tests { let serializer = PeerManagementMessageSerializer::new(); let mut buffer = vec![]; serializer.serialize(&message, &mut buffer).unwrap(); - let mut deserializer = + let deserializer = PeerManagementMessageDeserializer::new(PeerManagementMessageDeserializerArgs { max_listeners_per_peer: 1000, max_peers_per_announcement: 1000, }); - deserializer.set_message(1); let (rest, message) = deserializer .deserialize::(&buffer) .unwrap(); diff --git a/massa-protocol-worker/src/handlers/peer_handler/mod.rs b/massa-protocol-worker/src/handlers/peer_handler/mod.rs index 77db9cb31d3..380502c4074 100644 --- a/massa-protocol-worker/src/handlers/peer_handler/mod.rs +++ b/massa-protocol-worker/src/handlers/peer_handler/mod.rs @@ -7,26 +7,29 @@ use crossbeam::{ channel::{Receiver, Sender}, select, }; +use massa_hash::Hash; +use massa_models::config::SIGNATURE_DESER_SIZE; use massa_models::version::{VersionDeserializer, VersionSerializer}; -use massa_protocol_exports::{BootstrapPeers, ProtocolConfig}; +use massa_protocol_exports::{ + BootstrapPeers, PeerId, PeerIdDeserializer, PeerIdSerializer, ProtocolConfig, +}; use massa_serialization::{DeserializeError, Deserializer, Serializer}; -use peernet::types::PUBLIC_KEY_SIZE_BYTES; +use massa_signature::Signature; +use peernet::context::Context as _; +use peernet::messages::MessagesSerializer as _; use rand::{rngs::StdRng, RngCore, SeedableRng}; -use peernet::messages::MessagesSerializer; use peernet::{ error::{PeerNetError, PeerNetResult}, messages::MessagesHandler as PeerNetMessagesHandler, peer::InitConnectionHandler, - peer_id::PeerId, transports::{endpoint::Endpoint, TransportType}, - types::Hash, - types::{KeyPair, Signature}, }; use tracing::log::{debug, error, info, warn}; +use crate::context::Context; use crate::handlers::peer_handler::models::PeerState; -use crate::messages::MessagesHandler; +use crate::messages::{Message, MessagesHandler, MessagesSerializer}; use crate::wrap_network::ActiveConnectionsTrait; use self::models::PeerInfo; @@ -93,9 +96,9 @@ impl PeerManagementHandler { let peer_db = peer_db.clone(); let ticker = tick(Duration::from_secs(10)); let config = config.clone(); - let message_serializer = crate::messages::MessagesSerializer::new() + let message_serializer = MessagesSerializer::new() .with_peer_management_message_serializer(PeerManagementMessageSerializer::new()); - let mut message_deserializer = + let message_deserializer = PeerManagementMessageDeserializer::new(PeerManagementMessageDeserializerArgs { max_peers_per_announcement: config.max_size_peers_announcement, max_listeners_per_peer: config.max_size_listeners_per_peer, @@ -144,7 +147,7 @@ impl PeerManagementHandler { }).collect(); peers.push((peer_id.clone(), listeners)); } - if let Err(err) = responder.send(BootstrapPeers(peers)) { + if let Err(err) = responder.try_send(BootstrapPeers(peers)) { warn!("error sending bootstrap peers: {:?}", err); } }, @@ -160,8 +163,8 @@ impl PeerManagementHandler { } }, recv(receiver_msg) -> msg => { - let (peer_id, message_id, message) = match msg { - Ok((peer_id, message_id, message)) => (peer_id, message_id, message), + let (peer_id, message) = match msg { + Ok((peer_id, message)) => (peer_id, message), Err(_) => { return; } @@ -173,7 +176,6 @@ impl PeerManagementHandler { continue; } } - message_deserializer.set_message(message_id); let (rest, message) = match message_deserializer .deserialize::(&message) { Ok((rest, message)) => (rest, message), @@ -216,7 +218,7 @@ impl PeerManagementHandler { &mut message, ) .unwrap(); - sender_msg.send((peer_id.clone(), 0, message)).unwrap(); + sender_msg.try_send((peer_id.clone(), message)).unwrap(); } Self { @@ -253,7 +255,9 @@ pub struct MassaHandshake { pub version_deserializer: VersionDeserializer, pub config: ProtocolConfig, pub peer_db: SharedPeerDB, - peer_mngt_msg_serializer: crate::messages::MessagesSerializer, + peer_mngt_msg_serializer: MessagesSerializer, + peer_id_serializer: PeerIdSerializer, + peer_id_deserializer: PeerIdDeserializer, message_handlers: MessagesHandler, } @@ -274,22 +278,32 @@ impl MassaHandshake { version_serializer: VersionSerializer::new(), version_deserializer: VersionDeserializer::new(), config, - peer_mngt_msg_serializer: crate::messages::MessagesSerializer::new() + peer_id_serializer: PeerIdSerializer::new(), + peer_id_deserializer: PeerIdDeserializer::new(), + peer_mngt_msg_serializer: MessagesSerializer::new() .with_peer_management_message_serializer(PeerManagementMessageSerializer::new()), message_handlers, } } } -impl InitConnectionHandler for MassaHandshake { - fn perform_handshake( +impl InitConnectionHandler for MassaHandshake { + fn perform_handshake( &mut self, - keypair: &KeyPair, + context: &Context, endpoint: &mut Endpoint, listeners: &HashMap, - messages_handler: MassaMessagesHandler, + messages_handler: MessagesHandler, ) -> PeerNetResult { - let mut bytes = PeerId::from_public_key(keypair.get_public_key()).to_bytes(); + let mut bytes = vec![]; + self.peer_id_serializer + .serialize(&context.get_peer_id(), &mut bytes) + .map_err(|err| { + PeerNetError::HandshakeError.error( + "Massa Handshake", + Some(format!("Failed to serialize peer_id: {}", err)), + ) + })?; self.version_serializer .serialize(&self.config.version, &mut bytes) .map_err(|err| { @@ -299,8 +313,12 @@ impl InitConnectionHandler for MassaHandshake { ) })?; bytes.push(0); - let listeners_announcement = - Announcement::new(listeners.clone(), self.config.routable_ip, keypair).unwrap(); + let listeners_announcement = Announcement::new( + listeners.clone(), + self.config.routable_ip, + &context.our_keypair, + ) + .unwrap(); self.announcement_serializer .serialize(&listeners_announcement, &mut bytes) .map_err(|err| { @@ -309,23 +327,23 @@ impl InitConnectionHandler for MassaHandshake { Some(format!("Failed to serialize announcement: {}", err)), ) })?; - endpoint.send(&bytes)?; - let received = endpoint.receive()?; + endpoint.send::(&bytes)?; + let received = endpoint.receive::()?; if received.len() < 32 { return Err(PeerNetError::HandshakeError.error( "Massa Handshake", Some(format!("Received too short message len:{}", received.len())), )); } - let mut offset = 0; - let peer_id = - PeerId::from_bytes(&received[offset..offset + 32].try_into().map_err(|_| { + let (received, peer_id) = self + .peer_id_deserializer + .deserialize::(&received) + .map_err(|err| { PeerNetError::HandshakeError.error( "Massa Handshake", - Some("Failed to deserialize PeerId".to_string()), + Some(format!("Failed to deserialize peer id: {}", err)), ) - })?)?; - offset += PUBLIC_KEY_SIZE_BYTES; + })?; { let peer_db_read = self.peer_db.read(); if let Some(info) = peer_db_read.peers.get(&peer_id) { @@ -348,7 +366,7 @@ impl InitConnectionHandler for MassaHandshake { let (received, version) = self .version_deserializer - .deserialize::(&received[offset..]) + .deserialize::(received) .map_err(|err| { PeerNetError::HandshakeError.error( "Massa Handshake", @@ -361,17 +379,20 @@ impl InitConnectionHandler for MassaHandshake { Some(format!("Received version incompatible: {}", version)), )); } - offset = 0; - let id = received.get(offset).ok_or( + let id = received.first().ok_or( PeerNetError::HandshakeError .error("Massa Handshake", Some("Failed to get id".to_string())), )?; - offset += 1; match id { 0 => { let (_, announcement) = self .announcement_deserializer - .deserialize::(&received[offset..]) + .deserialize::( + received.get(1..).ok_or(PeerNetError::HandshakeError.error( + "Massa Handshake", + Some("Failed to get data".to_string()), + ))?, + ) .map_err(|err| { PeerNetError::HandshakeError.error( "Massa Handshake", @@ -390,24 +411,27 @@ impl InitConnectionHandler for MassaHandshake { announcement.clone().listeners, )); let mut bytes = Vec::new(); - let peer_management_message_serializer = PeerManagementMessageSerializer::new(); + let peer_management_message_serializer = MessagesSerializer::new() + .with_peer_management_message_serializer( + PeerManagementMessageSerializer::new(), + ); peer_management_message_serializer - .serialize(&message, &mut bytes) + .serialize(&Message::PeerManagement(Box::new(message)), &mut bytes) .map_err(|err| { PeerNetError::HandshakeError.error( "Massa Handshake", Some(format!("Failed to serialize announcement: {}", err)), ) })?; - messages_handler.handle(7, &bytes, &peer_id)?; + messages_handler.handle(&bytes, &peer_id)?; let mut self_random_bytes = [0u8; 32]; StdRng::from_entropy().fill_bytes(&mut self_random_bytes); let self_random_hash = Hash::compute_from(&self_random_bytes); let mut bytes = [0u8; 32]; bytes[..32].copy_from_slice(&self_random_bytes); - endpoint.send(&bytes)?; - let received = endpoint.receive()?; + endpoint.send::(&bytes)?; + let received = endpoint.receive::()?; let other_random_bytes: &[u8; 32] = received.as_slice().try_into().map_err(|_| { PeerNetError::HandshakeError.error( @@ -418,27 +442,22 @@ impl InitConnectionHandler for MassaHandshake { // sign their random bytes let other_random_hash = Hash::compute_from(other_random_bytes); - let self_signature = keypair.sign(&other_random_hash).map_err(|_| { - PeerNetError::HandshakeError.error( - "Massa Handshake", - Some("Failed to sign random bytes".to_string()), - ) - })?; + let self_signature = + context.our_keypair.sign(&other_random_hash).map_err(|_| { + PeerNetError::HandshakeError.error( + "Massa Handshake", + Some("Failed to sign random bytes".to_string()), + ) + })?; - let mut bytes = [0u8; 64]; + let mut bytes = [0u8; SIGNATURE_DESER_SIZE]; bytes.copy_from_slice(&self_signature.to_bytes()); - endpoint.send(&bytes)?; - let received = endpoint.receive()?; + endpoint.send::(&bytes)?; + let received = endpoint.receive::()?; let other_signature = - Signature::from_bytes(received.as_slice().try_into().map_err(|_| { - PeerNetError::HandshakeError.error( - "Massa Handshake", - Some("Failed to get random bytes".to_string()), - ) - })?) - .map_err(|_| { + Signature::from_bytes(received.as_slice()).map_err(|_| { PeerNetError::HandshakeError.error( "Massa Handshake", Some("Failed to sign 2 random bytes".to_string()), @@ -446,14 +465,22 @@ impl InitConnectionHandler for MassaHandshake { })?; // check their signature - peer_id.verify_signature(&self_random_hash, &other_signature)?; + peer_id + .verify_signature(&self_random_hash, &other_signature) + .map_err(|err| { + PeerNetError::HandshakeError + .error("Massa Handshake", Some(format!("Signature error {}", err))) + })?; Ok((peer_id.clone(), Some(announcement))) } 1 => { - let (received, id) = self - .message_handlers - .deserialize_id(&received[offset..], &peer_id)?; - self.message_handlers.handle(id, received, &peer_id)?; + self.message_handlers.handle( + received.get(1..).ok_or( + PeerNetError::HandshakeError + .error("Massa Handshake", Some("Failed to get data".to_string())), + )?, + &peer_id, + )?; Ok((peer_id.clone(), None)) } _ => Err(PeerNetError::HandshakeError @@ -514,29 +541,33 @@ impl InitConnectionHandler for MassaHandshake { let mut buf = Vec::new(); let msg = PeerManagementMessage::ListPeers(peers_to_send).into(); - self.peer_mngt_msg_serializer.serialize_id(&msg, &mut buf)?; self.peer_mngt_msg_serializer.serialize(&msg, &mut buf)?; - endpoint.send(buf.as_slice())?; + endpoint.send::(buf.as_slice())?; res.map(|(id, _)| id) } fn fallback_function( &mut self, - keypair: &KeyPair, + context: &Context, endpoint: &mut Endpoint, _listeners: &HashMap, ) -> PeerNetResult<()> { //TODO: Fix this clone - let keypair = keypair.clone(); + let context = context.clone(); let mut endpoint = endpoint.try_clone()?; let db = self.peer_db.clone(); let serializer = self.peer_mngt_msg_serializer.clone(); let version_serializer = self.version_serializer.clone(); + let peer_id_serializer = self.peer_id_serializer.clone(); let version = self.config.version; std::thread::spawn(move || { let peers_to_send = db.read().get_rand_peers_to_send(100); - let mut buf = PeerId::from_public_key(keypair.get_public_key()).to_bytes(); + let mut buf = vec![]; + if let Err(err) = peer_id_serializer.serialize(&context.get_peer_id(), &mut buf) { + warn!("{}", err.to_string()); + return; + } if let Err(err) = version_serializer .serialize(&version, &mut buf) .map_err(|err| { @@ -554,16 +585,13 @@ impl InitConnectionHandler for MassaHandshake { } buf.push(1); let msg = PeerManagementMessage::ListPeers(peers_to_send).into(); - if let Err(err) = serializer.serialize_id(&msg, &mut buf) { - warn!("Failed to serialize id message: {}", err); - return; - } if let Err(err) = serializer.serialize(&msg, &mut buf) { warn!("Failed to serialize message: {}", err); return; } - //TODO: Make it non blockable - if let Err(err) = endpoint.send(buf.as_slice()) { + if let Err(err) = + endpoint.send_timeout::(buf.as_slice(), Duration::from_millis(200)) + { warn!("Failed to send message: {}", err); return; } diff --git a/massa-protocol-worker/src/handlers/peer_handler/models.rs b/massa-protocol-worker/src/handlers/peer_handler/models.rs index 4c26fb0d6b4..1d5ade920a1 100644 --- a/massa-protocol-worker/src/handlers/peer_handler/models.rs +++ b/massa-protocol-worker/src/handlers/peer_handler/models.rs @@ -1,18 +1,18 @@ use crossbeam::channel::Sender; -use massa_protocol_exports::{BootstrapPeers, ProtocolError}; +use massa_protocol_exports::{BootstrapPeers, PeerId, ProtocolError}; use massa_time::MassaTime; use parking_lot::RwLock; -use peernet::{peer_id::PeerId, transports::TransportType}; +use peernet::transports::TransportType; use rand::seq::SliceRandom; use std::cmp::Reverse; use std::collections::BTreeSet; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::time::Duration; use std::{collections::HashMap, net::SocketAddr, sync::Arc}; use tracing::log::info; use super::announcement::Announcement; -const THREE_DAYS_MS: u128 = 3 * 24 * 60 * 60 * 1_000_000; +const THREE_DAYS_MS: u64 = 3 * 24 * 60 * 60 * 1_000_000; pub type InitialPeers = HashMap>; @@ -20,14 +20,14 @@ pub type InitialPeers = HashMap>; pub struct PeerDB { pub peers: HashMap, /// peers tested successfully last is the oldest value (only routable peers) //TODO: need to be pruned - pub index_by_newest: BTreeSet<(Reverse, PeerId)>, + pub index_by_newest: BTreeSet<(Reverse, PeerId)>, /// Tested addresses used to avoid testing the same address too often. //TODO: Need to be pruned pub tested_addresses: HashMap, } pub type SharedPeerDB = Arc>; -pub type PeerMessageTuple = (PeerId, u64, Vec); +pub type PeerMessageTuple = (PeerId, Vec); #[derive(Clone, Debug)] pub struct PeerInfo { @@ -101,10 +101,9 @@ impl PeerDB { nb_peers: usize, ) -> Vec<(PeerId, HashMap)> { //TODO: Add ourself - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time went backward") - .as_millis(); + let now = MassaTime::now() + .expect("Unable to get MassaTime::now") + .to_millis(); let min_time = now - THREE_DAYS_MS; diff --git a/massa-protocol-worker/src/handlers/peer_handler/tester.rs b/massa-protocol-worker/src/handlers/peer_handler/tester.rs index d751f683c4a..9a9d3f90a90 100644 --- a/massa-protocol-worker/src/handlers/peer_handler/tester.rs +++ b/massa-protocol-worker/src/handlers/peer_handler/tester.rs @@ -9,16 +9,13 @@ use std::{ use crate::messages::MessagesHandler; use crossbeam::channel::{Receiver, Sender}; use massa_models::version::{Version, VersionDeserializer}; -use massa_protocol_exports::{PeerConnectionType, ProtocolConfig}; +use massa_protocol_exports::{PeerConnectionType, PeerId, PeerIdDeserializer, ProtocolConfig}; use massa_serialization::{DeserializeError, Deserializer}; use massa_time::MassaTime; use peernet::{ error::{PeerNetError, PeerNetResult}, messages::MessagesHandler as PeerNetMessagesHandler, - peer::InitConnectionHandler, - peer_id::PeerId, - transports::{endpoint::Endpoint, TransportType}, - types::KeyPair, + transports::TransportType, }; use std::cmp::Reverse; use tracing::info; @@ -29,166 +26,6 @@ use super::{ SharedPeerDB, }; use crate::wrap_network::ActiveConnectionsTrait; - -#[derive(Clone)] -pub struct TesterHandshake { - peer_db: SharedPeerDB, - our_version: Version, - announcement_deserializer: AnnouncementDeserializer, - version_deserializer: VersionDeserializer, -} - -impl TesterHandshake { - #[allow(dead_code)] - pub fn new(peer_db: SharedPeerDB, config: ProtocolConfig) -> Self { - Self { - peer_db, - announcement_deserializer: AnnouncementDeserializer::new( - AnnouncementDeserializerArgs { - max_listeners: config.max_size_listeners_per_peer, - }, - ), - our_version: config.version, - version_deserializer: VersionDeserializer::new(), - } - } -} - -impl InitConnectionHandler for TesterHandshake { - fn perform_handshake( - &mut self, - _: &KeyPair, - endpoint: &mut Endpoint, - _: &HashMap, - messages_handler: MassaMessagesHandler, - ) -> PeerNetResult { - let data = endpoint.receive()?; - if data.is_empty() { - return Err(PeerNetError::HandshakeError.error( - "Tester Handshake", - Some(String::from("Peer didn't accepted us")), - )); - } - let peer_id = PeerId::from_bytes(&data[..32].try_into().map_err(|_| { - PeerNetError::HandshakeError.error( - "Massa Handshake", - Some("Failed to deserialize PeerId".to_string()), - ) - })?)?; - let res = { - { - // check if peer is banned - let mut peer_db_write = self.peer_db.write(); - if let Some(info) = peer_db_write.peers.get_mut(&peer_id) { - if info.state == super::PeerState::Banned { - return Err(PeerNetError::HandshakeError - .error("Tester Handshake", Some(String::from("Peer is banned")))); - } - } - } - - let (data, version) = self - .version_deserializer - .deserialize::(&data[32..]) - .map_err(|err| { - PeerNetError::HandshakeError.error( - "Tester Handshake", - Some(format!("Failed to deserialize version: {}", err)), - ) - })?; - if !self.our_version.is_compatible(&version) { - return Err(PeerNetError::HandshakeError.error( - "Massa Handshake", - Some(format!("Received version incompatible: {}", version)), - )); - } - let id = data.first().ok_or( - PeerNetError::HandshakeError - .error("Massa Handshake", Some("Failed to get id".to_string())), - )?; - match id { - 0 => { - let (_, announcement) = self - .announcement_deserializer - .deserialize::(&data[1..]) - .map_err(|err| { - PeerNetError::HandshakeError.error( - "Tester Handshake", - Some(format!("Failed to deserialize announcement: {}", err)), - ) - })?; - - if peer_id - .verify_signature(&announcement.hash, &announcement.signature) - .is_err() - { - return Err(PeerNetError::HandshakeError - .error("Tester Handshake", Some(String::from("Invalid signature")))); - } - //TODO: Check ip we are connected match one of the announced ips - { - let mut peer_db_write = self.peer_db.write(); - //TODO: Hacky change it when better management ip/listeners - if !announcement.listeners.is_empty() { - peer_db_write - .index_by_newest - .retain(|(_, peer_id_stored)| peer_id_stored != &peer_id); - peer_db_write - .index_by_newest - .insert((Reverse(announcement.timestamp), peer_id.clone())); - } - peer_db_write - .peers - .entry(peer_id.clone()) - .and_modify(|info| { - if info.last_announce.timestamp < announcement.timestamp { - info.last_announce = announcement.clone(); - } - info.state = super::PeerState::Trusted; - }) - .or_insert(PeerInfo { - last_announce: announcement, - state: super::PeerState::Trusted, - }); - } - Ok(peer_id.clone()) - } - 1 => { - let (received, id) = messages_handler.deserialize_id(&data[1..], &peer_id)?; - messages_handler.handle(id, received, &peer_id)?; - Err(PeerNetError::HandshakeError.error( - "Massa Handshake", - Some("Tester Handshake failed received a message that our connection has been refused".to_string()), - )) - //TODO: Add the peerdb but for now impossible as we don't have announcement and we need one to place in peerdb - } - _ => Err(PeerNetError::HandshakeError - .error("Massa handshake", Some("Invalid id".to_string()))), - } - }; - - // if handshake failed, we set the peer state to HandshakeFailed - if res.is_err() { - let mut peer_db_write = self.peer_db.write(); - peer_db_write.peers.entry(peer_id).and_modify(|info| { - info.state = super::PeerState::HandshakeFailed; - }); - } - endpoint.shutdown(); - res - } - - fn fallback_function( - &mut self, - _keypair: &KeyPair, - _endpoint: &mut Endpoint, - _listeners: &HashMap, - ) -> PeerNetResult<()> { - std::thread::sleep(Duration::from_millis(10000)); - Ok(()) - } -} - pub struct Tester { pub handler: Option>, } @@ -235,6 +72,7 @@ impl Tester { peer_db: SharedPeerDB, announcement_deserializer: AnnouncementDeserializer, version_deserializer: VersionDeserializer, + peer_id_deserializer: PeerIdDeserializer, addr: SocketAddr, our_version: Version, ) -> PeerNetResult { @@ -268,12 +106,14 @@ impl Tester { Some(String::from("Peer didn't accepted us")), )); } - let peer_id = PeerId::from_bytes(&data[..32].try_into().map_err(|_| { - PeerNetError::HandshakeError.error( - "Massa Handshake", - Some("Failed to deserialize PeerId".to_string()), - ) - })?)?; + let (data, peer_id) = peer_id_deserializer + .deserialize::(&data) + .map_err(|_| { + PeerNetError::HandshakeError.error( + "Massa Handshake", + Some("Failed to deserialize PeerId".to_string()), + ) + })?; let res = { { // check if peer is banned @@ -287,7 +127,7 @@ impl Tester { } let (data, version) = version_deserializer - .deserialize::(&data[32..]) + .deserialize::(data) .map_err(|err| { PeerNetError::HandshakeError.error( "Tester Handshake", @@ -307,7 +147,12 @@ impl Tester { match id { 0 => { let (_, announcement) = announcement_deserializer - .deserialize::(&data[1..]) + .deserialize::(data.get(1..).ok_or( + PeerNetError::HandshakeError.error( + "Massa Handshake", + Some("Failed to get buffer".to_string()), + ), + )?) .map_err(|err| { PeerNetError::HandshakeError.error( "Tester Handshake", @@ -353,9 +198,13 @@ impl Tester { Ok(peer_id.clone()) } 1 => { - let (received, id) = - messages_handler.deserialize_id(&data[1..], &peer_id)?; - messages_handler.handle(id, received, &peer_id)?; + messages_handler.handle( + data.get(1..).ok_or(PeerNetError::HandshakeError.error( + "Massa Handshake", + Some("Failed to get buffer".to_string()), + ))?, + &peer_id, + )?; Err(PeerNetError::HandshakeError.error( "Massa Handshake", Some("Tester Handshake failed received a message that our connection has been refused".to_string()), @@ -503,6 +352,7 @@ impl Tester { db.clone(), announcement_deser.clone(), VersionDeserializer::new(), + PeerIdDeserializer::new(), *addr, protocol_config.version, ); @@ -557,6 +407,7 @@ impl Tester { db.clone(), announcement_deser.clone(), VersionDeserializer::new(), + PeerIdDeserializer::new(), listener, protocol_config.version, ); diff --git a/massa-protocol-worker/src/lib.rs b/massa-protocol-worker/src/lib.rs index 59d734f97cd..76d2d8ca773 100644 --- a/massa-protocol-worker/src/lib.rs +++ b/massa-protocol-worker/src/lib.rs @@ -3,6 +3,7 @@ #![feature(ip)] mod connectivity; +mod context; mod controller; mod handlers; mod manager; diff --git a/massa-protocol-worker/src/messages.rs b/massa-protocol-worker/src/messages.rs index a8eee31c320..f2ce030c6fb 100644 --- a/massa-protocol-worker/src/messages.rs +++ b/massa-protocol-worker/src/messages.rs @@ -1,13 +1,14 @@ use crossbeam::channel::Sender; +use massa_protocol_exports::PeerId; use massa_serialization::{ DeserializeError, Deserializer, Serializer, U64VarIntDeserializer, U64VarIntSerializer, }; +use num_enum::{IntoPrimitive, TryFromPrimitive}; use peernet::{ error::{PeerNetError, PeerNetResult}, messages::{ MessagesHandler as PeerNetMessagesHandler, MessagesSerializer as PeerNetMessagesSerializer, }, - peer_id::PeerId, }; use crate::handlers::{ @@ -27,6 +28,26 @@ pub enum Message { PeerManagement(Box), } +#[derive(IntoPrimitive, Debug, Eq, PartialEq, TryFromPrimitive)] +#[repr(u64)] +pub enum MessageTypeId { + Block = 0, + Endorsement = 1, + Operation = 2, + PeerManagement = 3, +} + +impl From<&Message> for MessageTypeId { + fn from(value: &Message) -> Self { + match value { + Message::Block(_) => MessageTypeId::Block, + Message::Endorsement(_) => MessageTypeId::Endorsement, + Message::Operation(_) => MessageTypeId::Operation, + Message::PeerManagement(_) => MessageTypeId::PeerManagement, + } + } +} + //TODO: Macroize this impl From for Message { fn from(message: BlockMessage) -> Self { @@ -52,25 +73,6 @@ impl From for Message { } } -impl Message { - //TODO: Macroize get_id and max_id - fn get_id(&self) -> u64 { - match self { - Message::Block(message) => message.get_id() as u64, - Message::Endorsement(message) => message.get_id() as u64 + BlockMessage::max_id(), - Message::Operation(message) => { - message.get_id() as u64 + BlockMessage::max_id() + EndorsementMessage::max_id() - } - Message::PeerManagement(message) => { - message.get_id() as u64 - + BlockMessage::max_id() - + EndorsementMessage::max_id() - + OperationMessage::max_id() - } - } - } -} - #[derive(Clone)] pub struct MessagesSerializer { id_serializer: U64VarIntSerializer, @@ -131,19 +133,24 @@ impl MessagesSerializer { } impl PeerNetMessagesSerializer for MessagesSerializer { - /// Serialize the id of a message - fn serialize_id(&self, message: &Message, buffer: &mut Vec) -> PeerNetResult<()> { + /// Serialize the message + fn serialize(&self, message: &Message, buffer: &mut Vec) -> PeerNetResult<()> { self.id_serializer - .serialize(&message.get_id(), buffer) + .serialize( + &MessageTypeId::from(message).try_into().map_err(|_| { + PeerNetError::HandlerError.error( + "MessagesSerializer", + Some(String::from("Failed to serialize id")), + ) + })?, + buffer, + ) .map_err(|err| { PeerNetError::HandlerError.error( - "MessagesSerializer", - Some(format!("Failed to serialize message id: {}", err)), + "MessagesHandler", + Some(format!("Failed to serialize id {}", err)), ) - }) - } - /// Serialize the message - fn serialize(&self, message: &Message, buffer: &mut Vec) -> PeerNetResult<()> { + })?; match message { Message::Block(message) => { if let Some(serializer) = &self.block_message_serializer { @@ -211,99 +218,67 @@ impl PeerNetMessagesSerializer for MessagesSerializer { #[derive(Clone)] pub struct MessagesHandler { + pub id_deserializer: U64VarIntDeserializer, pub sender_blocks: Sender, pub sender_endorsements: Sender, pub sender_operations: Sender, pub sender_peers: Sender, - pub id_deserializer: U64VarIntDeserializer, } -impl PeerNetMessagesHandler for MessagesHandler { - fn deserialize_id<'a>( - &self, - data: &'a [u8], - _peer_id: &PeerId, - ) -> PeerNetResult<(&'a [u8], u64)> { - if data.is_empty() { - return Err(PeerNetError::ReceiveError.error( - "MessagesHandler", - Some("Empty message received".to_string()), - )); - } - self.id_deserializer +impl PeerNetMessagesHandler for MessagesHandler { + fn handle(&self, data: &[u8], peer_id: &PeerId) -> PeerNetResult<()> { + let (data, raw_id) = self + .id_deserializer .deserialize::(data) .map_err(|err| { PeerNetError::HandlerError.error( "MessagesHandler", - Some(format!("Failed to deserialize message id: {}", err)), + Some(format!("Failed to deserialize id: {}", err)), ) - }) - } - - fn handle(&self, id: u64, data: &[u8], peer_id: &PeerId) -> PeerNetResult<()> { - let block_max_id = BlockMessage::max_id(); - let endorsement_max_id = EndorsementMessage::max_id(); - let operation_max_id = OperationMessage::max_id(); - let peer_management_max_id = PeerManagementMessage::max_id(); - if id < block_max_id { - self.sender_blocks - .send((peer_id.clone(), id, data.to_vec())) + })?; + let id = MessageTypeId::try_from(raw_id).map_err(|_| { + PeerNetError::HandlerError.error( + "MessagesHandler", + Some(String::from("Failed to deserialize id")), + ) + })?; + match id { + MessageTypeId::Block => self + .sender_blocks + .send((peer_id.clone(), data.to_vec())) .map_err(|err| { PeerNetError::HandlerError.error( "MessagesHandler", Some(format!("Failed to send block message to channel: {}", err)), ) - }) - } else if id < endorsement_max_id + block_max_id { - self.sender_endorsements - .send((peer_id.clone(), id - block_max_id, data.to_vec())) + }), + MessageTypeId::Endorsement => self + .sender_endorsements + .try_send((peer_id.clone(), data.to_vec())) .map_err(|err| { PeerNetError::HandlerError.error( "MessagesHandler", - Some(format!( - "Failed to send endorsement message to channel: {}", - err - )), + Some(format!("Failed to send block message to channel: {}", err)), ) - }) - } else if id < operation_max_id + block_max_id + endorsement_max_id { - self.sender_operations - .send(( - peer_id.clone(), - id - (block_max_id + endorsement_max_id), - data.to_vec(), - )) + }), + MessageTypeId::Operation => self + .sender_operations + .try_send((peer_id.clone(), data.to_vec())) .map_err(|err| { PeerNetError::HandlerError.error( "MessagesHandler", - Some(format!( - "Failed to send operation message to channel: {}", - err - )), + Some(format!("Failed to send block message to channel: {}", err)), ) - }) - } else if id < peer_management_max_id + block_max_id + endorsement_max_id + operation_max_id - { - self.sender_peers - .send(( - peer_id.clone(), - id - (block_max_id + endorsement_max_id + operation_max_id), - data.to_vec(), - )) + }), + MessageTypeId::PeerManagement => self + .sender_peers + .try_send((peer_id.clone(), data.to_vec())) .map_err(|err| { PeerNetError::HandlerError.error( "MessagesHandler", - Some(format!( - "Failed to send peer management message to channel: {}", - err - )), + Some(format!("Failed to send block message to channel: {}", err)), ) - }) - } else { - Err(PeerNetError::HandlerError.error( - "MessagesHandler", - Some(format!("Unknown message id: {}", id)), - )) + }), } } } diff --git a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs index abbd9d29d93..c4f10f10a73 100644 --- a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs +++ b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs @@ -4,10 +4,10 @@ use std::time::Duration; use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::{block_id::BlockId, prehash::PreHashSet, slot::Slot}; +use massa_protocol_exports::PeerId; use massa_protocol_exports::{test_exports::tools, ProtocolConfig}; use massa_signature::KeyPair; use massa_time::MassaTime; -use peernet::peer_id::PeerId; use serial_test::serial; use crate::{ @@ -41,23 +41,21 @@ fn test_protocol_bans_node_sending_block_header_with_invalid_signature() { mut consensus_event_receiver, pool_event_receiver| { //1. Create 1 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, _node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, _node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); //2. Create a block with bad public key. let mut block = tools::create_block(&node_a_keypair); - block.content.header.content_creator_pub_key = KeyPair::generate().get_public_key(); + block.content.header.content_creator_pub_key = + KeyPair::generate(0).unwrap().get_public_key(); //end setup //3. Send header to protocol. network_controller .send_from_peer( &node_a_peer_id, - Message::Block(Box::new(BlockMessage::BlockHeader( - block.content.header.clone(), - ))), + Message::Block(Box::new(BlockMessage::BlockHeader(block.content.header))), ) .unwrap(); @@ -109,21 +107,20 @@ fn test_protocol_bans_node_sending_operation_with_invalid_signature() { consensus_event_receiver, mut pool_event_receiver| { //1. Create 1 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, _node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, _node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); //2. Create a operation with bad public key. let mut operation = tools::create_operation_with_expire_period(&node_a_keypair, 1); - operation.content_creator_pub_key = KeyPair::generate().get_public_key(); + operation.content_creator_pub_key = KeyPair::generate(0).unwrap().get_public_key(); //end setup //3. Send operation to protocol. network_controller .send_from_peer( &node_a_peer_id, - Message::Operation(OperationMessage::Operations(vec![operation.clone()])), + Message::Operation(OperationMessage::Operations(vec![operation])), ) .unwrap(); @@ -175,10 +172,9 @@ fn test_protocol_bans_node_sending_header_with_invalid_signature() { consensus_event_receiver, pool_event_receiver| { //1. Create 1 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); //2. Creates 2 ops let operation_1 = tools::create_operation_with_expire_period(&node_a_keypair, 1); let operation_2 = tools::create_operation_with_expire_period(&node_a_keypair, 1); @@ -187,7 +183,7 @@ fn test_protocol_bans_node_sending_header_with_invalid_signature() { let block = tools::create_block_with_operations( &node_a_keypair, Slot::new(1, 1), - vec![operation_1.clone()], + vec![operation_1], ); //4. Node A send the block @@ -233,25 +229,22 @@ fn test_protocol_bans_node_sending_header_with_invalid_signature() { ); //8. Create a new node - let node_b_keypair = KeyPair::generate(); - let (_node_b_peer_id, _node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (_node_b_peer_id, _node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //9. Create a new block with the operation 2 let block_2 = tools::create_block_with_operations( &node_b_keypair, Slot::new(1, 1), - vec![operation_2.clone()], + vec![operation_2], ); //10. Node A tries to send it network_controller .send_from_peer( &node_a_peer_id, - Message::Block(Box::new(BlockMessage::BlockHeader( - block_2.content.header.clone(), - ))), + Message::Block(Box::new(BlockMessage::BlockHeader(block_2.content.header))), ) .expect_err("Node A should not be able to send a block"); std::thread::sleep(std::time::Duration::from_millis(1000)); @@ -286,10 +279,9 @@ fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_header( mut consensus_event_receiver, pool_event_receiver| { //1. Create 1 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); //2. Create a block. let block = tools::create_block(&node_a_keypair); @@ -327,9 +319,10 @@ fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_header( let expected_hash = block.id; //6. Get node A banned // New keypair to avoid getting same block id - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let mut block = tools::create_block(&keypair); - block.content.header.content_creator_pub_key = KeyPair::generate().get_public_key(); + block.content.header.content_creator_pub_key = + KeyPair::generate(0).unwrap().get_public_key(); network_controller .send_from_peer( &node_a_peer_id, @@ -350,7 +343,7 @@ fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_header( //8. Send a wishlist that ask for the first block protocol_controller .send_wishlist_delta( - vec![(expected_hash, Some(block.content.header.clone()))] + vec![(expected_hash, Some(block.content.header))] .into_iter() .collect(), PreHashSet::::default(), @@ -390,14 +383,12 @@ fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { mut consensus_event_receiver, pool_event_receiver| { //1. Create 2 node - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let (node_a_peer_id, _node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (node_b_peer_id, _node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, _node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (node_b_peer_id, _node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Create a block. let block = tools::create_block(&node_a_keypair); @@ -445,10 +436,9 @@ fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { None => {} } //6. Connect a new node that don't known about the attack. - let node_c_keypair = KeyPair::generate(); - let (_node_c_peer_id, _node_c) = network_controller.create_fake_connection( - PeerId::from_bytes(node_c_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_c_keypair = KeyPair::generate(0).unwrap(); + let (_node_c_peer_id, _node_c) = network_controller + .create_fake_connection(PeerId::from_public_key(node_c_keypair.get_public_key())); //7. Notify the attack protocol_controller.notify_block_attack(block.id).unwrap(); diff --git a/massa-protocol-worker/src/tests/block_scenarios.rs b/massa-protocol-worker/src/tests/block_scenarios.rs index d33bea72462..7c3784a6c0d 100644 --- a/massa-protocol-worker/src/tests/block_scenarios.rs +++ b/massa-protocol-worker/src/tests/block_scenarios.rs @@ -12,10 +12,10 @@ use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::prehash::PreHashSet; use massa_models::{block_id::BlockId, slot::Slot}; use massa_protocol_exports::test_exports::tools; +use massa_protocol_exports::PeerId; use massa_protocol_exports::ProtocolConfig; use massa_signature::KeyPair; use massa_time::MassaTime; -use peernet::peer_id::PeerId; use serial_test::serial; #[test] @@ -38,14 +38,12 @@ fn test_full_ask_block_workflow() { mut consensus_event_receiver, pool_event_receiver| { //1. Create 2 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Create a block coming from node a. let op_1 = tools::create_operation_with_expire_period(&node_a_keypair, 5); @@ -146,7 +144,7 @@ fn test_full_ask_block_workflow() { &node_b_peer_id, Message::Block(Box::new(BlockMessage::ReplyForBlocks(vec![( block.id, - BlockInfoReply::Operations(vec![op_1.clone(), op_2.clone()]), + BlockInfoReply::Operations(vec![op_1, op_2]), )]))), ) .unwrap(); @@ -211,14 +209,12 @@ fn test_empty_block() { mut consensus_event_receiver, pool_event_receiver| { //1. Create 2 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Create a block coming from node a. let block = tools::create_block(&node_a_keypair); @@ -324,14 +320,12 @@ fn test_dont_want_it_anymore() { consensus_event_receiver, pool_event_receiver| { //1. Create 2 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Create a block coming from node a. let op_1 = tools::create_operation_with_expire_period(&node_a_keypair, 5); @@ -425,18 +419,15 @@ fn test_no_one_has_it() { consensus_event_receiver, pool_event_receiver| { //1. Create 3 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let node_c_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (_node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (_node_c_peer_id, node_c) = network_controller.create_fake_connection( - PeerId::from_bytes(node_c_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let node_c_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (_node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); + let (_node_c_peer_id, node_c) = network_controller + .create_fake_connection(PeerId::from_public_key(node_c_keypair.get_public_key())); //2. Create a block coming from node a. let block = tools::create_block(&node_a_keypair); @@ -504,18 +495,15 @@ fn test_multiple_blocks_without_a_priori() { consensus_event_receiver, pool_event_receiver| { //1. Create 3 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let node_c_keypair = KeyPair::generate(); - let (node_a_peer_id, _node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (_node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (_node_c_peer_id, node_c) = network_controller.create_fake_connection( - PeerId::from_bytes(node_c_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let node_c_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, _node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (_node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); + let (_node_c_peer_id, node_c) = network_controller + .create_fake_connection(PeerId::from_public_key(node_c_keypair.get_public_key())); //2. Create 2 block coming from node a. let block_1 = tools::create_block(&node_a_keypair); @@ -599,18 +587,15 @@ fn test_protocol_sends_blocks_when_asked_for() { pool_event_receiver, mut storage| { //1. Create 3 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let node_c_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (_node_c_peer_id, node_c) = network_controller.create_fake_connection( - PeerId::from_bytes(node_c_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let node_c_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); + let (_node_c_peer_id, node_c) = network_controller + .create_fake_connection(PeerId::from_public_key(node_c_keypair.get_public_key())); //2. Create a block coming from node a. let block = tools::create_block(&node_a_keypair); @@ -686,18 +671,15 @@ fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_only_head pool_event_receiver, mut storage| { //1. Create 3 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let node_c_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (_node_c_peer_id, node_c) = network_controller.create_fake_connection( - PeerId::from_bytes(node_c_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let node_c_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); + let (_node_c_peer_id, node_c) = network_controller + .create_fake_connection(PeerId::from_public_key(node_c_keypair.get_public_key())); //2. Create a block coming from node a. let block = tools::create_block(&node_a_keypair); diff --git a/massa-protocol-worker/src/tests/cache_scenarios.rs b/massa-protocol-worker/src/tests/cache_scenarios.rs index d7081548d98..361f173858a 100644 --- a/massa-protocol-worker/src/tests/cache_scenarios.rs +++ b/massa-protocol-worker/src/tests/cache_scenarios.rs @@ -4,10 +4,10 @@ use std::time::Duration; use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::{block_id::BlockId, prehash::PreHashSet, slot::Slot}; +use massa_protocol_exports::PeerId; use massa_protocol_exports::{test_exports::tools, ProtocolConfig}; use massa_signature::KeyPair; use massa_time::MassaTime; -use peernet::peer_id::PeerId; use serial_test::serial; use crate::{ @@ -38,14 +38,12 @@ fn test_noting_block_does_not_panic_with_one_max_node_known_blocks_size() { mut consensus_event_receiver, pool_event_receiver| { //1. Create 2 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Create a block coming from node a. let op_1 = tools::create_operation_with_expire_period(&node_a_keypair, 5); @@ -146,7 +144,7 @@ fn test_noting_block_does_not_panic_with_one_max_node_known_blocks_size() { &node_b_peer_id, Message::Block(Box::new(BlockMessage::ReplyForBlocks(vec![( block.id, - BlockInfoReply::Operations(vec![op_1.clone(), op_2.clone()]), + BlockInfoReply::Operations(vec![op_1, op_2]), )]))), ) .unwrap(); diff --git a/massa-protocol-worker/src/tests/context.rs b/massa-protocol-worker/src/tests/context.rs index 0f8c14fd15b..d57cc9375dc 100644 --- a/massa-protocol-worker/src/tests/context.rs +++ b/massa-protocol-worker/src/tests/context.rs @@ -10,6 +10,7 @@ use massa_consensus_exports::{ test_exports::{ConsensusControllerImpl, ConsensusEventReceiver}, ConsensusController, }; +use massa_models::config::{MIP_STORE_STATS_BLOCK_CONSIDERED, MIP_STORE_STATS_COUNTERS_MAX}; //use crate::handlers::block_handler::BlockInfoReply; use massa_pool_exports::{ test_exports::{MockPoolController, PoolEventReceiver}, @@ -19,9 +20,10 @@ use massa_protocol_exports::{ PeerCategoryInfo, PeerId, ProtocolConfig, ProtocolController, ProtocolError, ProtocolManager, }; use massa_serialization::U64VarIntDeserializer; +use massa_signature::KeyPair; use massa_storage::Storage; +use massa_versioning::versioning::{MipStatsConfig, MipStore}; use parking_lot::RwLock; -use peernet::types::KeyPair; use std::ops::Bound::Included; use tracing::{debug, log::warn}; @@ -53,7 +55,7 @@ pub fn start_protocol_controller_with_mock_network( serde_json::from_slice::(keypair_bs58_check_encoded.as_bytes())? } else { // node file does not exist: generate the key and save it - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); if let Err(e) = std::fs::write(&config.keypair_file, serde_json::to_string(&keypair)?) { warn!("could not generate node key file: {}", e); } @@ -83,6 +85,12 @@ pub fn start_protocol_controller_with_mock_network( let network_controller = Box::new(MockNetworkController::new(message_handlers.clone())); + let mip_stats_config = MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }; + let mip_store = MipStore::try_from(([], mip_stats_config)).unwrap(); + let connectivity_thread_handle = start_connectivity_thread( PeerId::from_public_key(keypair.get_public_key()), network_controller.clone(), @@ -105,6 +113,7 @@ pub fn start_protocol_controller_with_mock_network( max_in_connections_per_ip: 10, }, config, + mip_store, )?; let manager = ProtocolManagerImpl::new(connectivity_thread_handle); diff --git a/massa-protocol-worker/src/tests/endorsements_scenarios.rs b/massa-protocol-worker/src/tests/endorsements_scenarios.rs index 2c756469aba..3a168683395 100644 --- a/massa-protocol-worker/src/tests/endorsements_scenarios.rs +++ b/massa-protocol-worker/src/tests/endorsements_scenarios.rs @@ -8,9 +8,10 @@ use massa_models::{ slot::Slot, }; use massa_pool_exports::test_exports::MockPoolControllerMessage; +use massa_protocol_exports::PeerId; use massa_protocol_exports::{test_exports::tools, ProtocolConfig}; use massa_signature::KeyPair; -use peernet::peer_id::PeerId; +use massa_time::MassaTime; use serial_test::serial; use crate::{ @@ -40,10 +41,9 @@ fn test_protocol_sends_valid_endorsements_it_receives_to_pool() { consensus_event_receiver, mut pool_event_receiver| { //1. Create 1 nodes - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, _node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, _node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); //2. Create an endorsement let endorsement = tools::create_endorsement(); @@ -58,16 +58,18 @@ fn test_protocol_sends_valid_endorsements_it_receives_to_pool() { .unwrap(); //3. Check protocol sends endorsements to pool. - let received_endorsements = - match pool_event_receiver.wait_command(1500.into(), |evt| match evt { + let received_endorsements = match pool_event_receiver.wait_command( + MassaTime::from_millis(1500), + |evt| match evt { evt @ MockPoolControllerMessage::AddEndorsements { .. } => Some(evt), _ => None, - }) { - Some(MockPoolControllerMessage::AddEndorsements { endorsements, .. }) => { - endorsements - } - _ => panic!("Unexpected or no protocol pool event."), - }; + }, + ) { + Some(MockPoolControllerMessage::AddEndorsements { endorsements, .. }) => { + endorsements + } + _ => panic!("Unexpected or no protocol pool event."), + }; assert!(received_endorsements .get_endorsement_refs() .contains(&endorsement.id)); @@ -103,28 +105,25 @@ fn test_protocol_does_not_send_invalid_endorsements_it_receives_to_pool() { consensus_event_receiver, mut pool_event_receiver| { //1. Create 1 nodes - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, _node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, _node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); //2. Create an endorsement let mut endorsement = tools::create_endorsement(); //3. Change endorsement to make signature is invalid - endorsement.content_creator_pub_key = node_a_keypair.get_public_key().clone(); + endorsement.content_creator_pub_key = node_a_keypair.get_public_key(); network_controller .send_from_peer( &node_a_peer_id, - Message::Endorsement(EndorsementMessage::Endorsements(vec![ - endorsement.clone() - ])), + Message::Endorsement(EndorsementMessage::Endorsements(vec![endorsement])), ) .unwrap(); //4. Check protocol does not send endorsements to pool. - pool_event_receiver.wait_command(1000.into(), |evt| match evt { + pool_event_receiver.wait_command(MassaTime::from_millis(1000), |evt| match evt { MockPoolControllerMessage::AddEndorsements { .. } => { panic!("Protocol send invalid endorsements.") } @@ -161,14 +160,12 @@ fn test_protocol_propagates_endorsements_to_active_nodes() { consensus_event_receiver, mut pool_event_receiver| { //1. Create 2 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (_node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (_node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Create an endorsement let endorsement = tools::create_endorsement(); @@ -183,7 +180,7 @@ fn test_protocol_propagates_endorsements_to_active_nodes() { .unwrap(); //3. Check protocol sends endorsements to pool. - pool_event_receiver.wait_command(1000.into(), |evt| match evt { + pool_event_receiver.wait_command(MassaTime::from_millis(1000), |evt| match evt { MockPoolControllerMessage::AddEndorsements { .. } => { Some(MockPoolControllerMessage::Any) } @@ -236,14 +233,12 @@ fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_about_it_b mut pool_event_receiver, mut storage| { //1. Create 2 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (_node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (_node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Create an endorsement let content = Endorsement { @@ -263,9 +258,7 @@ fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_about_it_b network_controller .send_from_peer( &node_a_peer_id, - Message::Block(Box::new(BlockMessage::BlockHeader( - block.content.header.clone(), - ))), + Message::Block(Box::new(BlockMessage::BlockHeader(block.content.header))), ) .unwrap(); @@ -277,7 +270,7 @@ fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_about_it_b protocol_controller.propagate_endorsements(storage).unwrap(); //3. Check protocol sends endorsements to pool. - pool_event_receiver.wait_command(1000.into(), |evt| match evt { + pool_event_receiver.wait_command(MassaTime::from_millis(1000), |evt| match evt { MockPoolControllerMessage::AddEndorsements { .. } => { Some(MockPoolControllerMessage::Any) } diff --git a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs index d371e12b0a7..fc9c6d5bb1e 100644 --- a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs @@ -14,10 +14,10 @@ use massa_models::{ secure_share::{Id, SecureShare, SecureShareContent}, slot::Slot, }; +use massa_protocol_exports::PeerId; use massa_protocol_exports::{test_exports::tools, ProtocolConfig}; use massa_signature::KeyPair; use massa_time::MassaTime; -use peernet::peer_id::PeerId; use serial_test::serial; use super::{context::protocol_test, tools::send_and_propagate_block}; @@ -42,14 +42,12 @@ fn test_protocol_does_propagate_operations_received_in_blocks() { mut consensus_event_receiver, pool_event_receiver| { //1. Create 2 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let (node_a_peer_id, _node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (_node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, _node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (_node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Create a block coming from node a. let op_1 = tools::create_operation_with_expire_period(&node_a_keypair, 5); @@ -143,14 +141,12 @@ fn test_protocol_sends_blocks_with_operations_to_consensus() { mut consensus_event_receiver, pool_event_receiver| { //1. Create 2 nodes - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let (node_a_peer_id, _node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (_node_b_peer_id, _node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, _node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (_node_b_peer_id, _node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Create a block coming from node a. let op_1 = tools::create_operation_with_expire_period(&node_a_keypair, 5); @@ -170,7 +166,7 @@ fn test_protocol_sends_blocks_with_operations_to_consensus() { block.clone(), &node_a_peer_id, &protocol_controller, - vec![op_1.clone()], + vec![op_1], ); //4. Verify that we sent to consensus @@ -203,6 +199,8 @@ fn test_protocol_sends_blocks_with_operations_to_consensus() { let header = BlockHeader::new_verifiable( BlockHeader { + announced_version: 0, + current_version: 0, slot: Slot::new(1, op_thread), parents: vec![ BlockId(Hash::compute_from("Genesis 0".as_bytes())), @@ -233,7 +231,7 @@ fn test_protocol_sends_blocks_with_operations_to_consensus() { block.clone(), &node_a_peer_id, &protocol_controller, - vec![op.clone()], + vec![op], ); // Check protocol did send block header to consensus but not the full block. @@ -286,7 +284,7 @@ fn test_protocol_sends_blocks_with_operations_to_consensus() { block.clone(), &node_a_peer_id, &protocol_controller, - vec![op.clone()], + vec![op], ); // Check protocol did send block header to consensus but not the full block. diff --git a/massa-protocol-worker/src/tests/mock_network.rs b/massa-protocol-worker/src/tests/mock_network.rs index 23a88a9f057..5581f6bf7d7 100644 --- a/massa-protocol-worker/src/tests/mock_network.rs +++ b/massa-protocol-worker/src/tests/mock_network.rs @@ -4,14 +4,13 @@ use std::{ }; use crossbeam::channel::{Receiver, Sender}; -use massa_protocol_exports::ProtocolError; +use massa_protocol_exports::{PeerId, ProtocolError}; use parking_lot::RwLock; use peernet::{ messages::{ MessagesHandler as PeerNetMessagesHandler, MessagesSerializer as PeerNetMessagesSerializer, }, peer::PeerConnectionType, - peer_id::PeerId, }; use crate::{ @@ -58,8 +57,8 @@ impl ActiveConnectionsTrait for SharedMockActiveConnections { ) -> HashMap)> { self.read() .connections - .iter() - .map(|(peer_id, _)| { + .keys() + .map(|peer_id| { ( peer_id.clone(), ( @@ -83,7 +82,12 @@ impl ActiveConnectionsTrait for SharedMockActiveConnections { message: Message, _high_priority: bool, ) -> Result<(), massa_protocol_exports::ProtocolError> { - let _ = self.read().connections.get(peer_id).unwrap().send(message); + let _ = self + .read() + .connections + .get(peer_id) + .unwrap() + .try_send(message); Ok(()) } @@ -159,18 +163,11 @@ impl MockNetworkController { )); } let mut data = Vec::new(); - self.message_serializer - .serialize_id(&message, &mut data) - .map_err(|err| ProtocolError::GeneralProtocolError(err.to_string()))?; self.message_serializer .serialize(&message, &mut data) .map_err(|err| ProtocolError::GeneralProtocolError(err.to_string()))?; - let (rest, id) = self - .messages_handler - .deserialize_id(&data, peer_id) - .map_err(|err| ProtocolError::GeneralProtocolError(err.to_string()))?; self.messages_handler - .handle(id, rest, peer_id) + .handle(&data, peer_id) .map_err(|err| ProtocolError::GeneralProtocolError(err.to_string()))?; Ok(()) } @@ -201,7 +198,6 @@ impl NetworkController for MockNetworkController { &mut self, _addr: std::net::SocketAddr, _timeout: std::time::Duration, - _out_connection_config: &peernet::transports::OutConnectionConfig, ) -> Result<(), massa_protocol_exports::ProtocolError> { Ok(()) } diff --git a/massa-protocol-worker/src/tests/mod.rs b/massa-protocol-worker/src/tests/mod.rs index f8ec6bb903d..582a9e9dffa 100644 --- a/massa-protocol-worker/src/tests/mod.rs +++ b/massa-protocol-worker/src/tests/mod.rs @@ -1,10 +1,13 @@ use std::{collections::HashMap, fs::read_to_string, time::Duration}; use massa_consensus_exports::test_exports::ConsensusControllerImpl; +use massa_models::config::{MIP_STORE_STATS_BLOCK_CONSIDERED, MIP_STORE_STATS_COUNTERS_MAX}; use massa_pool_exports::test_exports::MockPoolController; -use massa_protocol_exports::{PeerCategoryInfo, PeerData, ProtocolConfig}; +use massa_protocol_exports::{PeerCategoryInfo, PeerData, PeerId, ProtocolConfig}; +use massa_signature::KeyPair; use massa_storage::Storage; -use peernet::{peer_id::PeerId, transports::TransportType, types::KeyPair}; +use massa_versioning::versioning::{MipStatsConfig, MipStore}; +use peernet::transports::TransportType; use tempfile::NamedTempFile; use crate::{create_protocol_controller, start_protocol_controller}; @@ -121,6 +124,14 @@ fn basic() { let (mut sender_manager1, channels1) = create_protocol_controller(config1.clone()); let (mut sender_manager2, channels2) = create_protocol_controller(config2.clone()); + + // Setup the MIP store + let mip_stats_config = MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }; + let mip_store = MipStore::try_from(([], mip_stats_config)).unwrap(); + // Setup the protocols let (mut manager1, _, _) = start_protocol_controller( config1, @@ -129,6 +140,7 @@ fn basic() { pool_controller1, storage1, channels1, + mip_store.clone(), ) .expect("Failed to start protocol 1"); let (mut manager2, _, _) = start_protocol_controller( @@ -138,6 +150,7 @@ fn basic() { pool_controller2, storage2, channels2, + mip_store, ) .expect("Failed to start protocol 2"); @@ -245,6 +258,13 @@ fn stop_with_controller_still_exists() { let storage1 = Storage::create_root(); let storage2 = Storage::create_root(); + // Setup the MIP store + let mip_stats_config = MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }; + let mip_store = MipStore::try_from(([], mip_stats_config)).unwrap(); + // Setup the protocols let (mut sender_manager1, channels1) = create_protocol_controller(config1.clone()); let (mut sender_manager2, channels2) = create_protocol_controller(config2.clone()); @@ -255,6 +275,7 @@ fn stop_with_controller_still_exists() { pool_controller1, storage1, channels1, + mip_store.clone(), ) .expect("Failed to start protocol 1"); let (mut manager2, _, _) = start_protocol_controller( @@ -264,6 +285,7 @@ fn stop_with_controller_still_exists() { pool_controller2, storage2, channels2, + mip_store, ) .expect("Failed to start protocol 2"); diff --git a/massa-protocol-worker/src/tests/operations_scenarios.rs b/massa-protocol-worker/src/tests/operations_scenarios.rs index 81da0949000..15efb552490 100644 --- a/massa-protocol-worker/src/tests/operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/operations_scenarios.rs @@ -5,10 +5,10 @@ use std::time::Duration; use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::{block_id::BlockId, prehash::PreHashSet, slot::Slot}; use massa_pool_exports::test_exports::MockPoolControllerMessage; +use massa_protocol_exports::PeerId; use massa_protocol_exports::{test_exports::tools, ProtocolConfig}; use massa_signature::KeyPair; use massa_time::MassaTime; -use peernet::peer_id::PeerId; use serial_test::serial; use crate::{ @@ -44,10 +44,9 @@ fn test_protocol_sends_valid_operations_it_receives_to_pool() { consensus_event_receiver, mut pool_event_receiver| { //1. Create 1 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, _node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, _node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); //2. Creates 2 ops let operation_1 = tools::create_operation_with_expire_period(&node_a_keypair, 1); let operation_2 = tools::create_operation_with_expire_period(&node_a_keypair, 1); @@ -64,14 +63,16 @@ fn test_protocol_sends_valid_operations_it_receives_to_pool() { .unwrap(); //4. Check protocol sends operations to pool. - let received_operations = - match pool_event_receiver.wait_command(1000.into(), |evt| match evt { + let received_operations = match pool_event_receiver.wait_command( + MassaTime::from_millis(1000), + |evt| match evt { evt @ MockPoolControllerMessage::AddOperations { .. } => Some(evt), _ => None, - }) { - Some(MockPoolControllerMessage::AddOperations { operations, .. }) => operations, - _ => panic!("Unexpected or no protocol pool event."), - }; + }, + ) { + Some(MockPoolControllerMessage::AddOperations { operations, .. }) => operations, + _ => panic!("Unexpected or no protocol pool event."), + }; let op_refs = received_operations.get_op_refs(); // Check the event includes the expected operations. @@ -113,26 +114,25 @@ fn test_protocol_does_not_send_invalid_operations_it_receives_to_pool() { consensus_event_receiver, mut pool_event_receiver| { //1. Create 1 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, _node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, _node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); //2. Creates 1 op let mut operation_1 = tools::create_operation_with_expire_period(&node_a_keypair, 1); //3. Making the signature of the op invalid - operation_1.content_creator_pub_key = KeyPair::generate().get_public_key(); + operation_1.content_creator_pub_key = KeyPair::generate(0).unwrap().get_public_key(); //4. Node A send the ops network_controller .send_from_peer( &node_a_peer_id, - Message::Operation(OperationMessage::Operations(vec![operation_1.clone()])), + Message::Operation(OperationMessage::Operations(vec![operation_1])), ) .unwrap(); //5. Check protocol didn't sent operations to pool. - match pool_event_receiver.wait_command(1000.into(), |_| Some(())) { + match pool_event_receiver.wait_command(MassaTime::from_millis(1000), |_| Some(())) { Some(_) => panic!("Unexpected or no protocol pool event."), _ => (), } @@ -169,14 +169,12 @@ fn test_protocol_propagates_operations_to_active_nodes() { mut pool_event_receiver, mut storage| { //1. Create 2 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let node_b_keypair = KeyPair::generate(); - let (_node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (_node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Creates 1 ops let operation = tools::create_operation_with_expire_period(&node_a_keypair, 1); @@ -189,7 +187,7 @@ fn test_protocol_propagates_operations_to_active_nodes() { .unwrap(); //4. Check protocol sends operations to pool. - pool_event_receiver.wait_command(1000.into(), |evt| match evt { + pool_event_receiver.wait_command(MassaTime::from_millis(1000), |evt| match evt { MockPoolControllerMessage::AddOperations { .. } => { Some(MockPoolControllerMessage::Any) } @@ -249,14 +247,12 @@ fn test_protocol_propagates_operations_received_over_the_network_only_to_nodes_t consensus_event_receiver, mut pool_event_receiver| { //1. Create 2 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let node_b_keypair = KeyPair::generate(); - let (_node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (_node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Creates 1 ops let operation = tools::create_operation_with_expire_period(&node_a_keypair, 1); @@ -269,7 +265,7 @@ fn test_protocol_propagates_operations_received_over_the_network_only_to_nodes_t .unwrap(); //4. Check protocol sends operations to pool. - pool_event_receiver.wait_command(1000.into(), |evt| match evt { + pool_event_receiver.wait_command(MassaTime::from_millis(1000), |evt| match evt { MockPoolControllerMessage::AddOperations { .. } => { Some(MockPoolControllerMessage::Any) } @@ -325,14 +321,12 @@ fn test_protocol_batches_propagation_of_operations_received_over_the_network_and mut pool_event_receiver, mut storage| { //1. Create 2 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let node_b_keypair = KeyPair::generate(); - let (_node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (_node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Creates 1 ops let operation = tools::create_operation_with_expire_period(&node_a_keypair, 1); @@ -345,7 +339,7 @@ fn test_protocol_batches_propagation_of_operations_received_over_the_network_and .unwrap(); //4. Check protocol sends operations to pool. - match pool_event_receiver.wait_command(1000.into(), |evt| match evt { + match pool_event_receiver.wait_command(MassaTime::from_millis(1000), |evt| match evt { MockPoolControllerMessage::AddOperations { .. } => { Some(MockPoolControllerMessage::Any) } @@ -420,14 +414,12 @@ fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_it_ind pool_event_receiver, mut storage| { //1. Create 2 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let node_b_keypair = KeyPair::generate(); - let (_node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (_node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Creates 1 ops let operation = tools::create_operation_with_expire_period(&node_a_keypair, 1); @@ -548,18 +540,15 @@ fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_it_ind pool_event_receiver, mut storage| { //1. Create 3 node - let node_a_keypair = KeyPair::generate(); - let node_b_keypair = KeyPair::generate(); - let node_c_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); - let (node_c_peer_id, node_c) = network_controller.create_fake_connection( - PeerId::from_bytes(node_c_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let node_c_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let (node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); + let (node_c_peer_id, node_c) = network_controller + .create_fake_connection(PeerId::from_public_key(node_c_keypair.get_public_key())); //2. Creates 2 ops let operation_1 = tools::create_operation_with_expire_period(&node_a_keypair, 1); let operation_2 = tools::create_operation_with_expire_period(&node_a_keypair, 1); @@ -623,7 +612,7 @@ fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_it_ind &node_c_peer_id, Message::Block(Box::new(BlockMessage::ReplyForBlocks(vec![( block.id, - BlockInfoReply::Operations(vec![operation_1.clone()]), + BlockInfoReply::Operations(vec![operation_1]), )]))), ) .unwrap(); @@ -686,10 +675,9 @@ fn test_protocol_ask_operations_on_batch_received() { consensus_event_receiver, pool_event_receiver| { //1. Create 1 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); //2. Creates 1 op let operation = tools::create_operation_with_expire_period(&node_a_keypair, 1); @@ -746,14 +734,12 @@ fn test_protocol_re_ask_operations_to_another_node_on_batch_received_after_delay consensus_event_receiver, pool_event_receiver| { //1. Create 2 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let node_b_keypair = KeyPair::generate(); - let (node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Creates 1 op let operation = tools::create_operation_with_expire_period(&node_a_keypair, 1); @@ -832,14 +818,12 @@ fn test_protocol_does_not_re_ask_operations_to_another_node_if_received() { consensus_event_receiver, pool_event_receiver| { //1. Create 2 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let node_b_keypair = KeyPair::generate(); - let (node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Creates 1 op let operation = tools::create_operation_with_expire_period(&node_a_keypair, 1); @@ -919,14 +903,12 @@ fn test_protocol_on_ask_operations() { consensus_event_receiver, pool_event_receiver| { //1. Create 2 node - let node_a_keypair = KeyPair::generate(); - let (node_a_peer_id, _node_a) = network_controller.create_fake_connection( - PeerId::from_bytes(node_a_keypair.get_public_key().to_bytes()).unwrap(), - ); - let node_b_keypair = KeyPair::generate(); - let (node_b_peer_id, node_b) = network_controller.create_fake_connection( - PeerId::from_bytes(node_b_keypair.get_public_key().to_bytes()).unwrap(), - ); + let node_a_keypair = KeyPair::generate(0).unwrap(); + let (node_a_peer_id, _node_a) = network_controller + .create_fake_connection(PeerId::from_public_key(node_a_keypair.get_public_key())); + let node_b_keypair = KeyPair::generate(0).unwrap(); + let (node_b_peer_id, node_b) = network_controller + .create_fake_connection(PeerId::from_public_key(node_b_keypair.get_public_key())); //2. Creates 1 op let operation = tools::create_operation_with_expire_period(&node_a_keypair, 1); diff --git a/massa-protocol-worker/src/tests/tools.rs b/massa-protocol-worker/src/tests/tools.rs index 03513e878e6..224d8c16b58 100644 --- a/massa-protocol-worker/src/tests/tools.rs +++ b/massa-protocol-worker/src/tests/tools.rs @@ -5,8 +5,7 @@ use massa_models::{ block::SecureShareBlock, block_id::BlockId, operation::SecureShareOperation, prehash::PreHashSet, }; -use massa_protocol_exports::ProtocolController; -use peernet::peer_id::PeerId; +use massa_protocol_exports::{PeerId, ProtocolController}; use crate::{ handlers::block_handler::{BlockInfoReply, BlockMessage}, @@ -77,7 +76,7 @@ pub fn send_and_propagate_block( ) { network_controller .send_from_peer( - &node_id, + node_id, Message::Block(Box::new(BlockMessage::BlockHeader( block.content.header.clone(), ))), @@ -100,7 +99,7 @@ pub fn send_and_propagate_block( )]; network_controller .send_from_peer( - &node_id, + node_id, Message::Block(Box::new(BlockMessage::ReplyForBlocks(info))), ) .unwrap(); @@ -109,7 +108,7 @@ pub fn send_and_propagate_block( let info = vec![(block.id, BlockInfoReply::Operations(operations))]; network_controller .send_from_peer( - &node_id, + node_id, Message::Block(Box::new(BlockMessage::ReplyForBlocks(info))), ) .unwrap(); diff --git a/massa-protocol-worker/src/worker.rs b/massa-protocol-worker/src/worker.rs index b951e1276e0..09eeefbe826 100644 --- a/massa-protocol-worker/src/worker.rs +++ b/massa-protocol-worker/src/worker.rs @@ -9,19 +9,23 @@ use massa_protocol_exports::{ use massa_serialization::U64VarIntDeserializer; use massa_signature::KeyPair; use massa_storage::Storage; +use massa_time::MassaTime; +use massa_versioning::{ + keypair_factory::KeyPairFactory, + versioning::MipStore, + versioning_factory::{FactoryStrategy, VersioningFactory}, +}; use parking_lot::RwLock; use peernet::{ config::{PeerNetCategoryInfo, PeerNetConfiguration}, network_manager::PeerNetManager, - types::KeyPair as PeerNetKeyPair, -}; -use std::{ - collections::HashMap, fs::read_to_string, ops::Bound::Included, str::FromStr, sync::Arc, }; +use std::{collections::HashMap, fs::read_to_string, ops::Bound::Included, sync::Arc}; use tracing::{debug, log::warn}; use crate::{ connectivity::{start_connectivity_thread, ConnectivityCommand}, + context::Context, controller::ProtocolControllerImpl, handlers::{ block_handler::{ @@ -149,6 +153,7 @@ pub fn start_protocol_controller( pool_controller: Box, storage: Storage, protocol_channels: ProtocolChannels, + mip_store: MipStore, ) -> Result<(Box, KeyPair, NodeId), ProtocolError> { debug!("starting protocol controller"); let peer_db = Arc::new(RwLock::new(PeerDB::default())); @@ -170,11 +175,6 @@ pub fn start_protocol_controller( id_deserializer: U64VarIntDeserializer::new(Included(0), Included(u64::MAX)), }; - let mut peernet_config = PeerNetConfiguration::default( - MassaHandshake::new(peer_db.clone(), config.clone(), message_handlers.clone()), - message_handlers.clone(), - ); - // try to read node keypair from file, otherwise generate it & write to file. Then derive nodeId let keypair = if std::path::Path::is_file(&config.keypair_file) { // file exists: try to load it @@ -184,13 +184,28 @@ pub fn start_protocol_controller( serde_json::from_slice::(keypair_bs58_check_encoded.as_bytes())? } else { // node file does not exist: generate the key and save it - let keypair = KeyPair::generate(); + // MERGE TODO + let keypair_factory = KeyPairFactory { + mip_store: mip_store.clone(), + }; + let now = MassaTime::now().map_err(|e| { + ProtocolError::GeneralProtocolError(format!("Unable to get current time: {}", e)) + })?; + let keypair = keypair_factory.create(&(), FactoryStrategy::At(now))?; if let Err(e) = std::fs::write(&config.keypair_file, serde_json::to_string(&keypair)?) { warn!("could not generate node key file: {}", e); } keypair }; + let mut peernet_config = PeerNetConfiguration::default( + MassaHandshake::new(peer_db.clone(), config.clone(), message_handlers.clone()), + message_handlers.clone(), + Context { + our_keypair: keypair.clone(), + }, + ); + let initial_peers_infos = serde_json::from_str::>( &std::fs::read_to_string(&config.initial_peers)?, )?; @@ -213,8 +228,6 @@ pub fn start_protocol_controller( .collect() }; - let peernet_keypair = PeerNetKeyPair::from_str(&keypair.to_string()).unwrap(); - peernet_config.self_keypair = peernet_keypair.clone(); let peernet_categories = config .peers_categories .iter() @@ -266,7 +279,7 @@ pub fn start_protocol_controller( ))); let connectivity_thread_handle = start_connectivity_thread( - PeerId::from_public_key(peernet_keypair.get_public_key()), + PeerId::from_public_key(keypair.get_public_key()), network_controller, consensus_controller, pool_controller, @@ -311,6 +324,7 @@ pub fn start_protocol_controller( .collect(), config.default_category_info, config, + mip_store, )?; let manager = ProtocolManagerImpl::new(connectivity_thread_handle); diff --git a/massa-protocol-worker/src/wrap_network.rs b/massa-protocol-worker/src/wrap_network.rs index 21f1837a5a1..7a6cc2ba845 100644 --- a/massa-protocol-worker/src/wrap_network.rs +++ b/massa-protocol-worker/src/wrap_network.rs @@ -3,15 +3,15 @@ use std::{ net::SocketAddr, }; -use massa_protocol_exports::ProtocolError; +use massa_protocol_exports::{PeerId, ProtocolError}; use peernet::{ network_manager::{PeerNetManager, SharedActiveConnections}, peer::PeerConnectionType, - peer_id::PeerId, - transports::{OutConnectionConfig, TransportType}, + transports::TransportType, }; use crate::{ + context::Context, handlers::peer_handler::MassaHandshake, messages::{Message, MessagesHandler, MessagesSerializer}, }; @@ -40,7 +40,7 @@ impl Clone for Box { } } -impl ActiveConnectionsTrait for SharedActiveConnections { +impl ActiveConnectionsTrait for SharedActiveConnections { fn send_to_peer( &self, peer_id: &PeerId, @@ -51,7 +51,7 @@ impl ActiveConnectionsTrait for SharedActiveConnections { if let Some(connection) = self.read().connections.get(peer_id) { connection .send_channels - .send(message_serializer, message, high_priority) + .try_send(message_serializer, message, high_priority) .map_err(|err| ProtocolError::SendError(err.to_string())) } else { Err(ProtocolError::SendError( @@ -118,16 +118,17 @@ pub trait NetworkController: Send + Sync { &mut self, addr: SocketAddr, timeout: std::time::Duration, - out_connection_config: &OutConnectionConfig, ) -> Result<(), ProtocolError>; } pub struct NetworkControllerImpl { - peernet_manager: PeerNetManager, + peernet_manager: PeerNetManager, } impl NetworkControllerImpl { - pub fn new(peernet_manager: PeerNetManager) -> Self { + pub fn new( + peernet_manager: PeerNetManager, + ) -> Self { Self { peernet_manager } } } @@ -161,10 +162,10 @@ impl NetworkController for NetworkControllerImpl { &mut self, addr: SocketAddr, timeout: std::time::Duration, - out_connection_config: &OutConnectionConfig, ) -> Result<(), ProtocolError> { + //TODO: Change when we support multiple transports self.peernet_manager - .try_connect(addr, timeout, out_connection_config) + .try_connect(TransportType::Tcp, addr, timeout) .map_err(|err| ProtocolError::GeneralProtocolError(err.to_string()))?; Ok(()) } diff --git a/massa-sdk/Cargo.toml b/massa-sdk/Cargo.toml index d403405a69e..76be9f28631 100644 --- a/massa-sdk/Cargo.toml +++ b/massa-sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_sdk" -version = "0.1.0" +version = "0.23.0" edition = "2021" [dependencies] @@ -8,6 +8,10 @@ jsonrpsee = { version = "0.18.2", features = ["client"] } jsonrpsee-http-client = { version = "0.18.2", features = ["webpki-tls"] } jsonrpsee-ws-client = { version = "0.18.2", features = ["webpki-tls"] } http = "0.2.8" +tonic = { version = "0.9.1", features = ["gzip"] } +thiserror = "1.0" +tracing = {version = "0.1", features = ["log"]} massa_api_exports = { path = "../massa-api-exports" } massa_models = { path = "../massa-models" } massa_time = { path = "../massa-time" } +massa_proto = { path = "../massa-proto" } diff --git a/massa-sdk/src/lib.rs b/massa-sdk/src/lib.rs index bbfefd64cb3..f7c4ba86eb7 100644 --- a/massa-sdk/src/lib.rs +++ b/massa-sdk/src/lib.rs @@ -11,6 +11,9 @@ use jsonrpsee::http_client::HttpClient; use jsonrpsee::rpc_params; use jsonrpsee::types::ErrorObject; use jsonrpsee::ws_client::{HeaderMap, HeaderValue, WsClient, WsClientBuilder}; +use jsonrpsee::{core::RpcResult, http_client::HttpClientBuilder}; +use jsonrpsee_http_client as _; +use jsonrpsee_ws_client as _; use massa_api_exports::page::PagedVecV2; use massa_api_exports::ApiRequest; use massa_api_exports::{ @@ -39,25 +42,35 @@ use massa_models::{ prehash::{PreHashMap, PreHashSet}, version::Version, }; - -use jsonrpsee_http_client as _; -use jsonrpsee_ws_client as _; - -use jsonrpsee::{core::RpcResult, http_client::HttpClientBuilder}; +use massa_proto::massa::api::v1::massa_service_client::MassaServiceClient; use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; +use thiserror::Error; mod config; pub use config::ClientConfig; pub use config::HttpConfig; pub use config::WsConfig; +/// Error when creating a new client +#[derive(Error, Debug)] +pub enum ClientError { + /// Url error + #[error("Invalid grpc url: {0}")] + Url(#[from] http::uri::InvalidUri), + /// Connection error + #[error("Cannot connect to grpc server: {0}")] + Connect(#[from] tonic::transport::Error), +} + /// Client pub struct Client { /// public component pub public: RpcClient, /// private component pub private: RpcClient, + /// grpc client + pub grpc: Option>, } impl Client { @@ -66,16 +79,33 @@ impl Client { ip: IpAddr, public_port: u16, private_port: u16, + grpc_port: u16, http_config: &HttpConfig, - ) -> Client { + ) -> Result { let public_socket_addr = SocketAddr::new(ip, public_port); let private_socket_addr = SocketAddr::new(ip, private_port); + let grpc_socket_addr = SocketAddr::new(ip, grpc_port); let public_url = format!("http://{}", public_socket_addr); let private_url = format!("http://{}", private_socket_addr); - Client { + let grpc_url = format!("grpc://{}", grpc_socket_addr); + + // try to start grpc client and connect to the server + let grpc_opts = match tonic::transport::Channel::from_shared(grpc_url)? + .connect() + .await + { + Ok(channel) => Some(MassaServiceClient::new(channel)), + Err(e) => { + tracing::warn!("unable to connect to grpc server {}", e); + None + } + }; + + Ok(Client { public: RpcClient::from_url(&public_url, http_config).await, private: RpcClient::from_url(&private_url, http_config).await, - } + grpc: grpc_opts, + }) } } diff --git a/massa-serialization/Cargo.toml b/massa-serialization/Cargo.toml index ef58230055e..b6a88b6daf2 100644 --- a/massa-serialization/Cargo.toml +++ b/massa-serialization/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_serialization" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -9,7 +9,7 @@ edition = "2021" [dependencies] displaydoc = "0.2" thiserror = "1.0" -nom = "7.1" +nom = "=7.1" unsigned-varint = { version = "0.7.1", features = [ "nom", ], git = "https://github.com/cyphar/unsigned-varint.git", branch = "nom6-errors" } diff --git a/massa-serialization/src/lib.rs b/massa-serialization/src/lib.rs index 02a33c74236..2d894f2d6cd 100644 --- a/massa-serialization/src/lib.rs +++ b/massa-serialization/src/lib.rs @@ -252,6 +252,7 @@ u32, U32VarIntSerializer, u32_buffer, U32VarIntDeserializer, "`u32`"; u64, U64VarIntSerializer, u64_buffer, U64VarIntDeserializer, "`u64`" } +#[derive(Clone)] pub struct OptionSerializer where ST: Serializer, @@ -287,6 +288,7 @@ where } } +#[derive(Clone)] pub struct OptionDeserializer where T: Clone, @@ -337,7 +339,7 @@ where } /// Serializer for bool -#[derive(Debug, Default)] +#[derive(Clone, Debug, Default)] pub struct BoolSerializer {} impl BoolSerializer { @@ -355,7 +357,7 @@ impl Serializer for BoolSerializer { } /// Deserializer for bool -#[derive(Debug, Default)] +#[derive(Clone, Debug, Default)] pub struct BoolDeserializer {} impl BoolDeserializer { diff --git a/massa-signature/Cargo.toml b/massa-signature/Cargo.toml index 0fc04960fa1..5a6ea1684bb 100644 --- a/massa-signature/Cargo.toml +++ b/massa-signature/Cargo.toml @@ -1,19 +1,22 @@ [package] name = "massa_signature" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bs58 = { version = "0.4", features = ["check"] } +bs58 = { version = "=0.4", features = ["check"] } displaydoc = "0.2" -ed25519-dalek = { version = "1.0", features = ["batch"] } +ed25519-dalek = { version = "=1.0", features = ["batch"] } serde = { version = "1.0", features = ["derive"] } thiserror = "1.0" -nom = "7.1" +nom = "=7.1" rand = "0.7" +# TODO tag transition crate with a version number +transition = { git = "https://github.com/massalabs/transition.git", rev = "93fa3bf82f9f5ff421c78536879b7fd1b948ca75" } + # custom modules massa_hash = { path = "../massa-hash" } massa_serialization = { path = "../massa-serialization" } diff --git a/massa-signature/src/error.rs b/massa-signature/src/error.rs index 65da49ba4db..1f9d04b11ec 100644 --- a/massa-signature/src/error.rs +++ b/massa-signature/src/error.rs @@ -15,4 +15,7 @@ pub enum MassaSignatureError { /// Wrong prefix for hash: expected {0}, got {1} WrongPrefix(String, String), + + /// invalid version identifier: {0} + InvalidVersionError(String), } diff --git a/massa-signature/src/lib.rs b/massa-signature/src/lib.rs index 61b8994a225..de6ed920be9 100644 --- a/massa-signature/src/lib.rs +++ b/massa-signature/src/lib.rs @@ -8,6 +8,6 @@ mod signature_impl; pub use error::MassaSignatureError; pub use signature_impl::{ - verify_signature_batch, KeyPair, PublicKey, PublicKeyDeserializer, Signature, - SignatureDeserializer, PUBLIC_KEY_SIZE_BYTES, SECRET_KEY_BYTES_SIZE, SIGNATURE_SIZE_BYTES, + verify_signature_batch, KeyPair, PublicKey, PublicKeyDeserializer, PublicKeyV0, PublicKeyV1, + Signature, SignatureDeserializer, }; diff --git a/massa-signature/src/signature_impl.rs b/massa-signature/src/signature_impl.rs index 1eb7f6f9aea..748e55d33b7 100644 --- a/massa-signature/src/signature_impl.rs +++ b/massa-signature/src/signature_impl.rs @@ -1,7 +1,9 @@ // Copyright (c) 2022 MASSA LABS use crate::error::MassaSignatureError; -use ed25519_dalek::{verify_batch, Signer, Verifier}; + +use ed25519_dalek::{Signer, Verifier}; + use massa_hash::Hash; use massa_serialization::{ DeserializeError, Deserializer, Serializer, U64VarIntDeserializer, U64VarIntSerializer, @@ -16,45 +18,30 @@ use serde::{ ser::SerializeStruct, Deserialize, }; +use std::str::FromStr; use std::{borrow::Cow, cmp::Ordering, hash::Hasher, ops::Bound::Included}; -use std::{convert::TryInto, str::FromStr}; - -/// Size of a public key -pub const PUBLIC_KEY_SIZE_BYTES: usize = ed25519_dalek::PUBLIC_KEY_LENGTH; -/// Size of a keypair -pub const SECRET_KEY_BYTES_SIZE: usize = ed25519_dalek::SECRET_KEY_LENGTH; -/// Size of a signature -pub const SIGNATURE_SIZE_BYTES: usize = ed25519_dalek::SIGNATURE_LENGTH; -/// `KeyPair` is used for signature and decryption +use transition::Versioned; + +#[allow(missing_docs)] +/// versioned KeyPair used for signature and decryption +#[transition::versioned(versions("0", "1"))] pub struct KeyPair(ed25519_dalek::Keypair); impl Clone for KeyPair { fn clone(&self) -> Self { - KeyPair(ed25519_dalek::Keypair { - // This will never error since self is a valid keypair - secret: ed25519_dalek::SecretKey::from_bytes(self.0.secret.as_bytes()).unwrap(), - public: self.0.public, - }) + match self { + KeyPair::KeyPairV0(keypair) => KeyPair::KeyPairV0(keypair.clone()), + KeyPair::KeyPairV1(keypair) => KeyPair::KeyPairV1(keypair.clone()), + } } } -const SECRET_PREFIX: char = 'S'; -const KEYPAIR_VERSION: u64 = 0; - impl std::fmt::Display for KeyPair { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let u64_serializer = U64VarIntSerializer::new(); - let mut bytes = Vec::new(); - u64_serializer - .serialize(&KEYPAIR_VERSION, &mut bytes) - .map_err(|_| std::fmt::Error)?; - bytes.extend(self.to_bytes()); - write!( - f, - "{}{}", - SECRET_PREFIX, - bs58::encode(bytes).with_check().into_string() - ) + match self { + KeyPair::KeyPairV0(keypair) => keypair.fmt(f), + KeyPair::KeyPairV1(keypair) => keypair.fmt(f), + } } } @@ -64,8 +51,21 @@ impl std::fmt::Debug for KeyPair { } } +const SECRET_PREFIX: char = 'S'; + impl FromStr for KeyPair { type Err = MassaSignatureError; + + /// # Example + /// ``` + /// # use massa_signature::KeyPair; + /// # use std::str::FromStr; + /// + /// let keypair = KeyPair::generate(0).unwrap(); + /// let string = keypair.to_string(); + /// let keypair2 = KeyPair::from_str(&string).unwrap(); + /// assert_eq!(keypair.to_string(), keypair2.to_string()); + /// ``` fn from_str(s: &str) -> Result { let mut chars = s.chars(); match chars.next() { @@ -78,16 +78,7 @@ impl FromStr for KeyPair { .map_err(|_| { MassaSignatureError::ParsingError(format!("bad secret key bs58: {}", s)) })?; - let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); - let (rest, _version) = u64_deserializer - .deserialize::(&decoded_bs58_check[..]) - .map_err(|err| MassaSignatureError::ParsingError(err.to_string()))?; - KeyPair::from_bytes(&rest.try_into().map_err(|_| { - MassaSignatureError::ParsingError(format!( - "secret key not long enough for: {}", - s - )) - })?) + KeyPair::from_bytes(&decoded_bs58_check) } _ => Err(MassaSignatureError::ParsingError(format!( "bad secret prefix for: {}", @@ -98,21 +89,35 @@ impl FromStr for KeyPair { } impl KeyPair { - /// Generate a new `KeyPair` + /// Get the version of the given KeyPair + pub fn get_version(&self) -> u64 { + match self { + KeyPair::KeyPairV0(keypair) => keypair.get_version(), + KeyPair::KeyPairV1(keypair) => keypair.get_version(), + } + } + + /// Generates a new KeyPair of the version given as parameter. + /// Errors if the version number does not exist /// /// # Example /// ``` /// # use massa_signature::KeyPair; /// # use massa_hash::Hash; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let data = Hash::compute_from("Hello World!".as_bytes()); /// let signature = keypair.sign(&data).unwrap(); /// /// let serialized: String = signature.to_bs58_check(); - /// ``` - pub fn generate() -> Self { - let mut rng = OsRng::default(); - KeyPair(ed25519_dalek::Keypair::generate(&mut rng)) + pub fn generate(version: u64) -> Result { + match version { + ::VERSION => Ok(KeyPairVariant!["0"](::generate())), + ::VERSION => Ok(KeyPairVariant!["1"](::generate())), + _ => Err(MassaSignatureError::InvalidVersionError(format!( + "KeyPair version {} doesn't exist.", + version + ))), + } } /// Returns the Signature produced by signing @@ -122,55 +127,157 @@ impl KeyPair { /// ``` /// # use massa_signature::KeyPair; /// # use massa_hash::Hash; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let data = Hash::compute_from("Hello World!".as_bytes()); /// let signature = keypair.sign(&data).unwrap(); /// ``` pub fn sign(&self, hash: &Hash) -> Result { - Ok(Signature(self.0.sign(hash.to_bytes()))) + match self { + KeyPair::KeyPairV0(keypair) => keypair.sign(hash).map(Signature::SignatureV0), + KeyPair::KeyPairV1(keypair) => keypair.sign(hash).map(Signature::SignatureV1), + } } - /// Return the bytes representing the keypair (should be a reference in the future) + /// Return the total length after serialization + pub fn get_ser_len(&self) -> usize { + match self { + KeyPair::KeyPairV0(keypair) => keypair.get_ser_len(), + KeyPair::KeyPairV1(keypair) => keypair.get_ser_len(), + } + } + + /// Return the bytes (as a Vec) representing the keypair /// /// # Example /// ``` /// # use massa_signature::KeyPair; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let bytes = keypair.to_bytes(); /// ``` - pub fn to_bytes(&self) -> &[u8; SECRET_KEY_BYTES_SIZE] { - self.0.secret.as_bytes() + pub fn to_bytes(&self) -> Vec { + match self { + KeyPair::KeyPairV0(keypair) => keypair.to_bytes(), + KeyPair::KeyPairV1(keypair) => keypair.to_bytes(), + } } - /// Return the bytes representing the keypair + /// Get the public key of the keypair /// /// # Example /// ``` /// # use massa_signature::KeyPair; - /// let keypair = KeyPair::generate(); - /// let bytes = keypair.into_bytes(); + /// let keypair = KeyPair::generate(0).unwrap(); + /// let public_key = keypair.get_public_key(); /// ``` - pub fn into_bytes(&self) -> [u8; SECRET_KEY_BYTES_SIZE] { - self.0.secret.to_bytes() + pub fn get_public_key(&self) -> PublicKey { + match self { + KeyPair::KeyPairV0(keypair) => PublicKey::PublicKeyV0(keypair.get_public_key()), + KeyPair::KeyPairV1(keypair) => PublicKey::PublicKeyV1(keypair.get_public_key()), + } } - /// Convert a byte array of size `SECRET_KEY_BYTES_SIZE` to a `KeyPair` + /// Convert a byte slice to a `KeyPair` /// /// # Example /// ``` /// # use massa_signature::KeyPair; - /// let keypair = KeyPair::generate(); - /// let bytes = keypair.into_bytes(); + /// let keypair = KeyPair::generate(0).unwrap(); + /// let bytes = keypair.to_bytes(); /// let keypair2 = KeyPair::from_bytes(&bytes).unwrap(); + /// assert_eq!(keypair.to_string(), keypair2.to_string()); /// ``` - pub fn from_bytes(data: &[u8; SECRET_KEY_BYTES_SIZE]) -> Result { - let secret = ed25519_dalek::SecretKey::from_bytes(&data[..]).map_err(|err| { - MassaSignatureError::ParsingError(format!("keypair bytes parsing error: {}", err)) - })?; - Ok(KeyPair(ed25519_dalek::Keypair { - public: ed25519_dalek::PublicKey::from(&secret), - secret, - })) + pub fn from_bytes(data: &[u8]) -> Result { + let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); + let (rest, version) = u64_deserializer + .deserialize::(data) + .map_err(|err| MassaSignatureError::ParsingError(err.to_string()))?; + match version { + ::VERSION => { + Ok(KeyPairVariant!["0"](::from_bytes(rest)?)) + } + ::VERSION => { + Ok(KeyPairVariant!["1"](::from_bytes(rest)?)) + } + _ => Err(MassaSignatureError::InvalidVersionError(format!( + "Unknown keypair version: {}", + version + ))), + } + } +} + +#[transition::impl_version(versions("0", "1"))] +impl Clone for KeyPair { + fn clone(&self) -> Self { + KeyPair(ed25519_dalek::Keypair { + // This will never error since self is a valid keypair + secret: ed25519_dalek::SecretKey::from_bytes(self.0.secret.as_bytes()).unwrap(), + public: self.0.public, + }) + } +} + +#[transition::impl_version(versions("0", "1"))] +impl std::fmt::Display for KeyPair { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "{}{}", + SECRET_PREFIX, + bs58::encode(self.to_bytes()).with_check().into_string() + ) + } +} + +#[transition::impl_version(versions("0", "1"), structures("KeyPair"))] +impl KeyPair { + pub const SECRET_KEY_BYTES_SIZE: usize = ed25519_dalek::SECRET_KEY_LENGTH; + + /// Return the current version keypair + pub fn get_version(&self) -> u64 { + Self::VERSION + } + + /// Return the total length after serialization + pub fn get_ser_len(&self) -> usize { + Self::VERSION_VARINT_SIZE_BYTES + Self::SECRET_KEY_BYTES_SIZE + } + + /// Return the bytes representing the keypair (should be a reference in the future) + /// + /// # Example + /// ``` + /// # use massa_signature::KeyPair; + /// let keypair = KeyPair::generate(0).unwrap(); + /// let bytes = keypair.to_bytes(); + /// ``` + pub fn to_bytes(&self) -> Vec { + let version_serializer = U64VarIntSerializer::new(); + let mut bytes: Vec = + Vec::with_capacity(Self::VERSION_VARINT_SIZE_BYTES + Self::SECRET_KEY_BYTES_SIZE); + version_serializer + .serialize(&Self::VERSION, &mut bytes) + .unwrap(); + bytes.extend_from_slice(&self.0.secret.to_bytes()); + bytes + } +} + +#[transition::impl_version(versions("0", "1"), structures("KeyPair", "Signature", "PublicKey"))] +impl KeyPair { + /// Returns the Signature produced by signing + /// data bytes with a `KeyPair`. + /// + /// # Example + /// ``` + /// # use massa_signature::KeyPair; + /// # use massa_hash::Hash; + /// let keypair = KeyPair::generate(0).unwrap(); + /// let data = Hash::compute_from("Hello World!".as_bytes()); + /// let signature = keypair.sign(&data).unwrap(); + /// ``` + pub fn sign(&self, hash: &Hash) -> Result { + Ok(Signature(self.0.sign(hash.to_bytes()))) } /// Get the public key of the keypair @@ -178,12 +285,56 @@ impl KeyPair { /// # Example /// ``` /// # use massa_signature::KeyPair; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let public_key = keypair.get_public_key(); /// ``` pub fn get_public_key(&self) -> PublicKey { PublicKey(self.0.public) } + + /// Generate a new `KeyPair` + /// + /// # Example + /// ``` + /// # use massa_signature::KeyPair; + /// # use massa_hash::Hash; + /// let keypair = KeyPair::generate(0).unwrap(); + /// let data = Hash::compute_from("Hello World!".as_bytes()); + /// let signature = keypair.sign(&data).unwrap(); + /// + /// let serialized: String = signature.to_bs58_check(); + /// ``` + pub fn generate() -> Self { + let mut rng = OsRng; + KeyPair(ed25519_dalek::Keypair::generate(&mut rng)) + } + + /// Convert a byte array of size `SECRET_KEY_BYTES_SIZE` to a `KeyPair`. + /// + /// IMPORTANT: providing more bytes than needed does not result in an error. + /// + /// # Example + /// ``` + /// # use massa_signature::KeyPair; + /// let keypair = KeyPair::generate(0).unwrap(); + /// let bytes = keypair.to_bytes(); + /// let keypair2 = KeyPair::from_bytes(&bytes).unwrap(); + /// ``` + pub fn from_bytes(data: &[u8]) -> Result { + if data.len() < Self::SECRET_KEY_BYTES_SIZE { + return Err(MassaSignatureError::ParsingError( + "keypair byte array is of invalid size".to_string(), + )); + } + let secret = ed25519_dalek::SecretKey::from_bytes(&data[..Self::SECRET_KEY_BYTES_SIZE]) + .map_err(|err| { + MassaSignatureError::ParsingError(format!("keypair bytes parsing error: {}", err)) + })?; + Ok(KeyPair(ed25519_dalek::Keypair { + public: ed25519_dalek::PublicKey::from(&secret), + secret, + })) + } } impl ::serde::Serialize for KeyPair { @@ -198,7 +349,7 @@ impl ::serde::Serialize for KeyPair { /// ``` /// # use massa_signature::KeyPair; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let serialized: String = serde_json::to_string(&keypair).unwrap(); /// ``` /// @@ -223,7 +374,7 @@ impl<'de> ::serde::Deserialize<'de> for KeyPair { /// ``` /// # use massa_signature::KeyPair; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let serialized = serde_json::to_string(&keypair).unwrap(); /// let deserialized: KeyPair = serde_json::from_str(&serialized).unwrap(); /// ``` @@ -321,47 +472,61 @@ impl<'de> ::serde::Deserialize<'de> for KeyPair { } } +#[allow(missing_docs)] /// Public key used to check if a message was encoded /// by the corresponding `PublicKey`. /// Generated from the `KeyPair` using `SignatureEngine` +#[transition::versioned(versions("0", "1"))] #[derive(Clone, Copy, PartialEq, Eq)] pub struct PublicKey(ed25519_dalek::PublicKey); -const PUBLIC_PREFIX: char = 'P'; - #[allow(clippy::derived_hash_with_manual_eq)] impl std::hash::Hash for PublicKey { fn hash(&self, state: &mut H) { - self.0.as_bytes().hash(state); + match self { + PublicKey::PublicKeyV0(pubkey) => pubkey.hash(state), + PublicKey::PublicKeyV1(pubkey) => pubkey.hash(state), + } } } impl PartialOrd for PublicKey { fn partial_cmp(&self, other: &PublicKey) -> Option { - self.0.as_bytes().partial_cmp(other.0.as_bytes()) + self.to_bytes().partial_cmp(&other.to_bytes()) } } impl Ord for PublicKey { fn cmp(&self, other: &PublicKey) -> Ordering { - self.0.as_bytes().cmp(other.0.as_bytes()) + self.to_bytes().cmp(&other.to_bytes()) } } +#[test] +fn pubkey_ordering() { + use std::collections::BTreeSet; + + let v0 = vec![ + PublicKey::from_str("P1wiuz54kR2kmvumCELcgxv1YVStCnPK8QQ6os2FNbGYwp188im").unwrap(), + PublicKey::from_str("P12hzfgN14TCvAM3QgWvpPdHTKLUdqh2NzWqxkr2LAEG5hJmExr1").unwrap(), + ]; + let v1 = vec![ + PublicKey::from_str("P33GgHz13gmyTPfd1ntSWEr8WyQE6CoYj76EqwesX9VaRQDSc2d").unwrap(), + PublicKey::from_str("P4PSBj9N2trF4Dp3hvQ4CUojAH5HkRMkEFH9BXHAswRvwXsTaGN").unwrap(), + ]; + + let mut map = BTreeSet::new(); + map.extend(v1); + map.extend(v0.clone()); + assert_eq!(map.first(), v0.first()) +} + impl std::fmt::Display for PublicKey { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let u64_serializer = U64VarIntSerializer::new(); - let mut bytes = Vec::new(); - u64_serializer - .serialize(&KEYPAIR_VERSION, &mut bytes) - .map_err(|_| std::fmt::Error)?; - bytes.extend(self.to_bytes()); - write!( - f, - "{}{}", - PUBLIC_PREFIX, - bs58::encode(bytes).with_check().into_string() - ) + match self { + PublicKey::PublicKeyV0(pubkey) => pubkey.fmt(f), + PublicKey::PublicKeyV1(pubkey) => pubkey.fmt(f), + } } } @@ -371,8 +536,21 @@ impl std::fmt::Debug for PublicKey { } } +const PUBLIC_PREFIX: char = 'P'; + impl FromStr for PublicKey { type Err = MassaSignatureError; + + /// # Example + /// ``` + /// # use massa_signature::{KeyPair, PublicKey}; + /// # use std::str::FromStr; + /// + /// let pubkey = KeyPair::generate(0).unwrap().get_public_key(); + /// let string = pubkey.to_string(); + /// let pubkey_2 = PublicKey::from_str(&string).unwrap(); + /// assert_eq!(pubkey.to_string(), pubkey_2.to_string()); + /// ``` fn from_str(s: &str) -> Result { let mut chars = s.chars(); match chars.next() { @@ -385,13 +563,7 @@ impl FromStr for PublicKey { .map_err(|_| { MassaSignatureError::ParsingError("Bad public key bs58".to_owned()) })?; - let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); - let (rest, _version) = u64_deserializer - .deserialize::(&decoded_bs58_check[..]) - .map_err(|err| MassaSignatureError::ParsingError(err.to_string()))?; - PublicKey::from_bytes(&rest.try_into().map_err(|_| { - MassaSignatureError::ParsingError("Public key not long enough".to_string()) - })?) + PublicKey::from_bytes(&decoded_bs58_check) } _ => Err(MassaSignatureError::ParsingError( "Bad public key prefix".to_owned(), @@ -408,9 +580,17 @@ impl PublicKey { hash: &Hash, signature: &Signature, ) -> Result<(), MassaSignatureError> { - self.0.verify(hash.to_bytes(), &signature.0).map_err(|err| { - MassaSignatureError::SignatureError(format!("Signature verification failed: {}", err)) - }) + match (self, signature) { + (PublicKey::PublicKeyV0(pubkey), Signature::SignatureV0(signature)) => { + pubkey.verify_signature(hash, signature) + } + (PublicKey::PublicKeyV1(pubkey), Signature::SignatureV1(signature)) => { + pubkey.verify_signature(hash, signature) + } + _ => Err(MassaSignatureError::InvalidVersionError(String::from( + "The PublicKey and Signature versions do not match", + ))), + } } /// Serialize a `PublicKey` as bytes. @@ -419,54 +599,169 @@ impl PublicKey { /// ``` /// # use massa_signature::{PublicKey, KeyPair}; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// /// let serialize = keypair.get_public_key().to_bytes(); /// ``` - pub fn to_bytes(&self) -> &[u8; PUBLIC_KEY_SIZE_BYTES] { - self.0.as_bytes() + pub fn to_bytes(&self) -> Vec { + match self { + PublicKey::PublicKeyV0(pubkey) => pubkey.to_bytes(), + PublicKey::PublicKeyV1(pubkey) => pubkey.to_bytes(), + } + } + + /// Return the total length after serialization + pub fn get_ser_len(&self) -> usize { + match self { + PublicKey::PublicKeyV0(pubkey) => pubkey.get_ser_len(), + PublicKey::PublicKeyV1(pubkey) => pubkey.get_ser_len(), + } } - /// Serialize into bytes. + /// Deserialize a `PublicKey` from bytes. /// /// # Example /// ``` /// # use massa_signature::{PublicKey, KeyPair}; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// - /// let serialize = keypair.get_public_key().to_bytes(); + /// let serialized = keypair.get_public_key().to_bytes(); + /// let deserialized: PublicKey = PublicKey::from_bytes(&serialized).unwrap(); /// ``` - pub fn into_bytes(self) -> [u8; PUBLIC_KEY_SIZE_BYTES] { - self.0.to_bytes() + pub fn from_bytes(data: &[u8]) -> Result { + let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); + let (rest, version) = u64_deserializer + .deserialize::(data) + .map_err(|err| MassaSignatureError::ParsingError(err.to_string()))?; + match version { + ::VERSION => { + Ok(PublicKeyVariant!["0"](::from_bytes(rest)?)) + } + ::VERSION => { + Ok(PublicKeyVariant!["1"](::from_bytes(rest)?)) + } + _ => Err(MassaSignatureError::InvalidVersionError(format!( + "Unknown PublicKey version: {}", + version + ))), + } + } +} + +#[transition::impl_version(versions("0", "1"))] +#[allow(clippy::derived_hash_with_manual_eq)] +impl std::hash::Hash for PublicKey { + fn hash(&self, state: &mut H) { + self.0.to_bytes().hash(state); + } +} + +#[transition::impl_version(versions("0", "1"))] +impl PartialOrd for PublicKey { + fn partial_cmp(&self, other: &PublicKey) -> Option { + self.0.to_bytes().partial_cmp(&other.0.to_bytes()) + } +} + +#[transition::impl_version(versions("0", "1"))] +impl Ord for PublicKey { + fn cmp(&self, other: &PublicKey) -> Ordering { + self.0.to_bytes().cmp(&other.0.to_bytes()) + } +} + +#[transition::impl_version(versions("0", "1"))] +impl std::fmt::Display for PublicKey { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "{}{}", + PUBLIC_PREFIX, + bs58::encode(self.to_bytes()).with_check().into_string() + ) + } +} + +#[transition::impl_version(versions("0", "1"))] +impl std::fmt::Debug for PublicKey { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self) + } +} + +#[transition::impl_version(versions("0", "1"), structures("PublicKey", "Signature"))] +impl PublicKey { + /// Size of a public key + pub const PUBLIC_KEY_SIZE_BYTES: usize = ed25519_dalek::PUBLIC_KEY_LENGTH; + + /// Return the total length after serialization + pub fn get_ser_len(&self) -> usize { + Self::VERSION_VARINT_SIZE_BYTES + Self::PUBLIC_KEY_SIZE_BYTES + } + + /// Checks if the `Signature` associated with data bytes + /// was produced with the `KeyPair` associated to given `PublicKey` + pub fn verify_signature( + &self, + hash: &Hash, + signature: &Signature, + ) -> Result<(), MassaSignatureError> { + self.0.verify(hash.to_bytes(), &signature.0).map_err(|err| { + MassaSignatureError::SignatureError(format!("Signature verification failed: {}", err)) + }) + } + + /// Return the bytes representing the keypair (should be a reference in the future) + /// + /// # Example + /// ``` + /// # use massa_signature::KeyPair; + /// let keypair = KeyPair::generate(0).unwrap(); + /// let bytes = keypair.to_bytes(); + /// ``` + pub fn to_bytes(&self) -> Vec { + let version_serializer = U64VarIntSerializer::new(); + let mut bytes: Vec = + Vec::with_capacity(Self::VERSION_VARINT_SIZE_BYTES + Self::PUBLIC_KEY_SIZE_BYTES); + version_serializer + .serialize(&Self::VERSION, &mut bytes) + .unwrap(); + bytes.extend_from_slice(&self.0.to_bytes()); + bytes } /// Deserialize a `PublicKey` from bytes. /// + /// IMPORTANT: providing more bytes than needed does not result in an error. + /// /// # Example /// ``` /// # use massa_signature::{PublicKey, KeyPair}; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// - /// let serialized = keypair.get_public_key().into_bytes(); + /// let serialized = keypair.get_public_key().to_bytes(); /// let deserialized: PublicKey = PublicKey::from_bytes(&serialized).unwrap(); /// ``` - pub fn from_bytes( - data: &[u8; PUBLIC_KEY_SIZE_BYTES], - ) -> Result { - ed25519_dalek::PublicKey::from_bytes(data) + pub fn from_bytes(data: &[u8]) -> Result { + if data.len() < Self::PUBLIC_KEY_SIZE_BYTES { + return Err(MassaSignatureError::ParsingError( + "public key byte array is of invalid size".to_string(), + )); + } + ed25519_dalek::PublicKey::from_bytes(&data[..Self::PUBLIC_KEY_SIZE_BYTES]) .map(Self) .map_err(|err| MassaSignatureError::ParsingError(err.to_string())) } } -/// Serializer for `Signature` -#[derive(Default)] +/// Deserializer for `PublicKey` +#[derive(Default, Clone)] pub struct PublicKeyDeserializer; impl PublicKeyDeserializer { - /// Creates a `SignatureDeserializer` + /// Creates a `PublicKeyDeserializer` pub const fn new() -> Self { Self } @@ -478,10 +773,10 @@ impl Deserializer for PublicKeyDeserializer { /// use massa_serialization::{DeserializeError, Deserializer}; /// use massa_hash::Hash; /// - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let public_key = keypair.get_public_key(); /// let serialized = public_key.to_bytes(); - /// let (rest, deser_public_key) = PublicKeyDeserializer::new().deserialize::(serialized).unwrap(); + /// let (rest, deser_public_key) = PublicKeyDeserializer::new().deserialize::(&serialized).unwrap(); /// assert!(rest.is_empty()); /// assert_eq!(keypair.get_public_key(), deser_public_key); /// ``` @@ -489,28 +784,14 @@ impl Deserializer for PublicKeyDeserializer { &self, buffer: &'a [u8], ) -> IResult<&'a [u8], PublicKey, E> { - // Can't use try into directly because it fails if there is more data in the buffer - if buffer.len() < PUBLIC_KEY_SIZE_BYTES { - return Err(nom::Err::Error(ParseError::from_error_kind( + let public_key = PublicKey::from_bytes(buffer).map_err(|_| { + nom::Err::Error(ParseError::from_error_kind( buffer, - nom::error::ErrorKind::LengthValue, - ))); - } - let key = - PublicKey::from_bytes(buffer[..PUBLIC_KEY_SIZE_BYTES].try_into().map_err(|_| { - nom::Err::Error(ParseError::from_error_kind( - buffer, - nom::error::ErrorKind::LengthValue, - )) - })?) - .map_err(|_| { - nom::Err::Error(ParseError::from_error_kind( - buffer, - nom::error::ErrorKind::Fail, - )) - })?; - // Safe because the signature deserialization success - Ok((&buffer[PUBLIC_KEY_SIZE_BYTES..], key)) + nom::error::ErrorKind::Fail, + )) + })?; + // Safe because the signature deserialization succeeded + Ok((&buffer[public_key.get_ser_len()..], public_key)) } } @@ -526,7 +807,7 @@ impl ::serde::Serialize for PublicKey { /// ``` /// # use massa_signature::KeyPair; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let serialized: String = serde_json::to_string(&keypair.get_public_key()).unwrap(); /// ``` /// @@ -547,7 +828,7 @@ impl<'de> ::serde::Deserialize<'de> for PublicKey { /// ``` /// # use massa_signature::{PublicKey, KeyPair}; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// /// let serialized = serde_json::to_string(&keypair.get_public_key()).unwrap(); /// let deserialized: PublicKey = serde_json::from_str(&serialized).unwrap(); @@ -585,20 +866,49 @@ impl<'de> ::serde::Deserialize<'de> for PublicKey { } } +#[allow(missing_docs)] /// Signature generated from a message and a `KeyPair`. +#[transition::versioned(versions("0", "1"))] #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct Signature(ed25519_dalek::Signature); +#[transition::impl_version(versions("0", "1"), structures("Signature"))] +impl Signature { + /// Size of a signature + pub const SIGNATURE_SIZE_BYTES: usize = ed25519_dalek::SIGNATURE_LENGTH; +} + impl std::fmt::Display for Signature { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_bs58_check()) + match self { + Signature::SignatureV0(signature) => signature.fmt(f), + Signature::SignatureV1(signature) => signature.fmt(f), + } } } impl FromStr for Signature { type Err = MassaSignatureError; + + /// # Example + /// ``` + /// # use massa_signature::{KeyPair, Signature}; + /// # use massa_hash::Hash; + /// # use std::str::FromStr; + /// + /// let hash = Hash::compute_from("Hello World!".as_bytes()); + /// let signature = KeyPair::generate(0).unwrap().sign(&hash).unwrap(); + /// let string = signature.to_string(); + /// let signature_2 = Signature::from_str(&string).unwrap(); + /// assert_eq!(signature, signature_2); + /// ``` fn from_str(s: &str) -> Result { - Signature::from_bs58_check(s) + let data = s.chars().collect::(); + let decoded_bs58_check = bs58::decode(data) + .with_check(None) + .into_vec() + .map_err(|_| MassaSignatureError::ParsingError(format!("bad signature bs58: {}", s)))?; + Signature::from_bytes(&decoded_bs58_check) } } @@ -610,31 +920,52 @@ impl Signature { /// # use massa_signature::KeyPair; /// # use massa_hash::Hash; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let data = Hash::compute_from("Hello World!".as_bytes()); /// let signature = keypair.sign(&data).unwrap(); /// /// let serialized: String = signature.to_bs58_check(); /// ``` pub fn to_bs58_check(&self) -> String { - bs58::encode(self.to_bytes()).with_check().into_string() + match self { + Signature::SignatureV0(signature) => signature.to_bs58_check(), + Signature::SignatureV1(signature) => signature.to_bs58_check(), + } } - /// Serialize a Signature as bytes. + /// Deserialize a `Signature` using `bs58` encoding with checksum. /// /// # Example /// ``` - /// # use massa_signature::KeyPair; + /// # use massa_signature::{KeyPair, Signature}; /// # use massa_hash::Hash; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let data = Hash::compute_from("Hello World!".as_bytes()); /// let signature = keypair.sign(&data).unwrap(); /// - /// let serialized = signature.to_bytes(); + /// let serialized: String = signature.to_bs58_check(); + /// let deserialized: Signature = Signature::from_bs58_check(&serialized).unwrap(); /// ``` - pub fn to_bytes(&self) -> [u8; SIGNATURE_SIZE_BYTES] { - self.0.to_bytes() + pub fn from_bs58_check(data: &str) -> Result { + bs58::decode(data) + .with_check(None) + .into_vec() + .map_err(|err| { + MassaSignatureError::ParsingError(format!( + "signature bs58_check parsing error: {}", + err + )) + }) + .and_then(|signature| Signature::from_bytes(signature.as_slice())) + } + + /// Return the total length after serialization + pub fn get_ser_len(&self) -> usize { + match self { + Signature::SignatureV0(signature) => signature.get_ser_len(), + Signature::SignatureV1(signature) => signature.get_ser_len(), + } } /// Serialize a Signature into bytes. @@ -644,71 +975,163 @@ impl Signature { /// # use massa_signature::KeyPair; /// # use massa_hash::Hash; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let data = Hash::compute_from("Hello World!".as_bytes()); /// let signature = keypair.sign(&data).unwrap(); /// - /// let serialized = signature.into_bytes(); + /// let serialized = signature.to_bytes(); /// ``` - pub fn into_bytes(self) -> [u8; SIGNATURE_SIZE_BYTES] { - self.0.to_bytes() + pub fn to_bytes(&self) -> Vec { + match self { + Signature::SignatureV0(signature) => signature.to_bytes(), + Signature::SignatureV1(signature) => signature.to_bytes(), + } } - /// Deserialize a `Signature` using `bs58` encoding with checksum. + /// Deserialize a Signature from bytes. /// /// # Example /// ``` /// # use massa_signature::{KeyPair, Signature}; /// # use massa_hash::Hash; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); + /// let data = Hash::compute_from("Hello World!".as_bytes()); + /// let signature = keypair.sign(&data).unwrap(); + /// + /// let serialized = signature.to_bytes(); + /// let deserialized: Signature = Signature::from_bytes(&serialized).unwrap(); + /// ``` + pub fn from_bytes(data: &[u8]) -> Result { + let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); + let (rest, version) = u64_deserializer + .deserialize::(data) + .map_err(|err| MassaSignatureError::ParsingError(err.to_string()))?; + match version { + ::VERSION => { + Ok(SignatureVariant!["0"](::from_bytes(rest)?)) + } + ::VERSION => { + Ok(SignatureVariant!["1"](::from_bytes(rest)?)) + } + _ => Err(MassaSignatureError::InvalidVersionError(format!( + "Unknown signature version: {}", + version + ))), + } + } +} + +#[transition::impl_version(versions("0", "1"))] +impl std::fmt::Display for Signature { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_bs58_check()) + } +} + +#[transition::impl_version(versions("0", "1"), structures("Signature"))] +impl Signature { + /// Serialize a `Signature` using `bs58` encoding with checksum. + /// + /// # Example + /// ``` + /// # use massa_signature::KeyPair; + /// # use massa_hash::Hash; + /// # use serde::{Deserialize, Serialize}; + /// let keypair = KeyPair::generate(0).unwrap(); /// let data = Hash::compute_from("Hello World!".as_bytes()); /// let signature = keypair.sign(&data).unwrap(); /// /// let serialized: String = signature.to_bs58_check(); - /// let deserialized: Signature = Signature::from_bs58_check(&serialized).unwrap(); /// ``` - pub fn from_bs58_check(data: &str) -> Result { - bs58::decode(data) - .with_check(None) - .into_vec() - .map_err(|err| { - MassaSignatureError::ParsingError(format!( - "signature bs58_check parsing error: {}", - err - )) - }) - .and_then(|signature| { - Signature::from_bytes(&signature.try_into().map_err(|err| { - MassaSignatureError::ParsingError(format!( - "signature bs58_check parsing error: {:?}", - err - )) - })?) - }) + pub fn to_bs58_check(self) -> String { + bs58::encode(self.to_bytes()).with_check().into_string() + } + + /// Return the total length after serialization + pub fn get_ser_len(&self) -> usize { + Self::VERSION_VARINT_SIZE_BYTES + Self::SIGNATURE_SIZE_BYTES + } + + /// Serialize a Signature into bytes. + /// + /// # Example + /// ``` + /// # use massa_signature::KeyPair; + /// # use massa_hash::Hash; + /// # use serde::{Deserialize, Serialize}; + /// let keypair = KeyPair::generate(0).unwrap(); + /// let data = Hash::compute_from("Hello World!".as_bytes()); + /// let signature = keypair.sign(&data).unwrap(); + /// + /// let serialized = signature.to_bytes(); + /// ``` + pub fn to_bytes(self) -> Vec { + let version_serializer = U64VarIntSerializer::new(); + let mut bytes: Vec = + Vec::with_capacity(Self::VERSION_VARINT_SIZE_BYTES + Self::SIGNATURE_SIZE_BYTES); + version_serializer + .serialize(&Self::VERSION, &mut bytes) + .unwrap(); + bytes.extend_from_slice(&self.0.to_bytes()); + bytes } /// Deserialize a Signature from bytes. /// + /// IMPORTANT: providing more bytes than needed does not result in an error. + /// /// # Example /// ``` /// # use massa_signature::{KeyPair, Signature}; /// # use massa_hash::Hash; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let data = Hash::compute_from("Hello World!".as_bytes()); /// let signature = keypair.sign(&data).unwrap(); /// /// let serialized = signature.to_bytes(); /// let deserialized: Signature = Signature::from_bytes(&serialized).unwrap(); /// ``` - pub fn from_bytes(data: &[u8; SIGNATURE_SIZE_BYTES]) -> Result { - ed25519_dalek::Signature::from_bytes(&data[..]) + pub fn from_bytes(data: &[u8]) -> Result { + if data.len() < Self::SIGNATURE_SIZE_BYTES { + return Err(MassaSignatureError::ParsingError( + "signature byte array is of invalid size".to_string(), + )); + } + ed25519_dalek::Signature::from_bytes(&data[..Self::SIGNATURE_SIZE_BYTES]) .map(Self) .map_err(|err| { MassaSignatureError::ParsingError(format!("signature bytes parsing error: {}", err)) }) } + + /// Deserialize a `Signature` using `bs58` encoding with checksum. + /// + /// # Example + /// ``` + /// # use massa_signature::{KeyPair, Signature}; + /// # use massa_hash::Hash; + /// # use serde::{Deserialize, Serialize}; + /// let keypair = KeyPair::generate(0).unwrap(); + /// let data = Hash::compute_from("Hello World!".as_bytes()); + /// let signature = keypair.sign(&data).unwrap(); + /// + /// let serialized: String = signature.to_bs58_check(); + /// let deserialized: Signature = Signature::from_bs58_check(&serialized).unwrap(); + /// ``` + pub fn from_bs58_check(data: &str) -> Result { + bs58::decode(data) + .with_check(None) + .into_vec() + .map_err(|err| { + MassaSignatureError::ParsingError(format!( + "signature bs58_check parsing error: {}", + err + )) + }) + .and_then(|signature_bytes: Vec| Signature::from_bytes(&signature_bytes)) + } } impl ::serde::Serialize for Signature { @@ -724,7 +1147,7 @@ impl ::serde::Serialize for Signature { /// # use massa_signature::{KeyPair, Signature}; /// # use massa_hash::Hash; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let data = Hash::compute_from("Hello World!".as_bytes()); /// let signature = keypair.sign(&data).unwrap(); /// @@ -753,7 +1176,7 @@ impl<'de> ::serde::Deserialize<'de> for Signature { /// # use massa_signature::{KeyPair, Signature}; /// # use massa_hash::Hash; /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let data = Hash::compute_from("Hello World!".as_bytes()); /// let signature = keypair.sign(&data).unwrap(); /// @@ -763,9 +1186,9 @@ impl<'de> ::serde::Deserialize<'de> for Signature { /// fn deserialize>(d: D) -> Result { if d.is_human_readable() { - struct Base58CheckVisitor; + struct SignatureVisitor; - impl<'de> ::serde::de::Visitor<'de> for Base58CheckVisitor { + impl<'de> ::serde::de::Visitor<'de> for SignatureVisitor { type Value = Signature; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { @@ -777,7 +1200,7 @@ impl<'de> ::serde::Deserialize<'de> for Signature { E: ::serde::de::Error, { if let Ok(v_str) = std::str::from_utf8(v) { - Signature::from_bs58_check(v_str).map_err(E::custom) + Signature::from_str(v_str).map_err(E::custom) } else { Err(E::invalid_value(::serde::de::Unexpected::Bytes(v), &self)) } @@ -787,10 +1210,10 @@ impl<'de> ::serde::Deserialize<'de> for Signature { where E: ::serde::de::Error, { - Signature::from_bs58_check(v).map_err(E::custom) + Signature::from_str(v).map_err(E::custom) } } - d.deserialize_str(Base58CheckVisitor) + d.deserialize_str(SignatureVisitor) } else { struct BytesVisitor; @@ -805,7 +1228,7 @@ impl<'de> ::serde::Deserialize<'de> for Signature { where E: ::serde::de::Error, { - Signature::from_bytes(v.try_into().map_err(E::custom)?).map_err(E::custom) + Signature::from_bytes(v).map_err(E::custom) } } @@ -831,10 +1254,10 @@ impl Deserializer for SignatureDeserializer { /// use massa_serialization::{DeserializeError, Deserializer}; /// use massa_hash::Hash; /// - /// let keypair = KeyPair::generate(); + /// let keypair = KeyPair::generate(0).unwrap(); /// let data = Hash::compute_from("Hello World!".as_bytes()); /// let signature = keypair.sign(&data).unwrap(); - /// let serialized = signature.into_bytes(); + /// let serialized = signature.to_bytes(); /// let (rest, deser_signature) = SignatureDeserializer::new().deserialize::(&serialized).unwrap(); /// assert!(rest.is_empty()); /// assert_eq!(signature, deser_signature); @@ -843,35 +1266,18 @@ impl Deserializer for SignatureDeserializer { &self, buffer: &'a [u8], ) -> IResult<&'a [u8], Signature, E> { - // Can't use try into directly because it fails if there is more data in the buffer - if buffer.len() < SIGNATURE_SIZE_BYTES { - return Err(nom::Err::Error(ParseError::from_error_kind( - buffer, - nom::error::ErrorKind::LengthValue, - ))); - } - let signature = Signature::from_bytes(buffer[..SIGNATURE_SIZE_BYTES].try_into().unwrap()) - .map_err(|_| { + let signature = Signature::from_bytes(buffer).map_err(|_| { nom::Err::Error(ParseError::from_error_kind( buffer, nom::error::ErrorKind::Fail, )) })?; - // Safe because the signature deserialization success - Ok((&buffer[SIGNATURE_SIZE_BYTES..], signature)) + // Safe because the signature deserialization succeeded + Ok((&buffer[signature.get_ser_len()..], signature)) } } -/// Verify a batch of signatures on a single core to gain total CPU performance. -/// Every provided triplet `(hash, signature, public_key)` is verified -/// and an error is returned if at least one of them fails. -/// -/// # Arguments -/// * `batch`: a slice of triplets `(hash, signature, public_key)` -/// -/// # Return value -/// Returns `Ok(())` if all signatures were successfully verified, -/// and `Err(MassaSignatureError::SignatureError(_))` if at least one of them failed. +/// Verifies a batch of signatures pub fn verify_signature_batch( batch: &[(Hash, Signature, PublicKey)], ) -> Result<(), MassaSignatureError> { @@ -886,19 +1292,35 @@ pub fn verify_signature_batch( return public_key.verify_signature(&hash, &signature); } - // otherwise, use batch verif - + // otherwise, use batch verification let mut hashes = Vec::with_capacity(batch.len()); let mut signatures = Vec::with_capacity(batch.len()); let mut public_keys = Vec::with_capacity(batch.len()); - batch.iter().for_each(|(hash, signature, public_key)| { + + for (hash, signature_, public_key_) in batch.iter() { + let (signature, public_key) = match (signature_, public_key_) { + (Signature::SignatureV0(s), PublicKey::PublicKeyV0(pk)) => (s.0, pk.0), + (Signature::SignatureV1(s), PublicKey::PublicKeyV1(pk)) => (s.0, pk.0), + _ => { + return Err(MassaSignatureError::InvalidVersionError(String::from( + "Batch contains unsupported or incompatible versions", + ))) + } + }; + hashes.push(hash.to_bytes().as_slice()); - signatures.push(signature.0); - public_keys.push(public_key.0); - }); - verify_batch(&hashes, signatures.as_slice(), public_keys.as_slice()).map_err(|err| { - MassaSignatureError::SignatureError(format!("Batch signature verification failed: {}", err)) - }) + signatures.push(signature); + public_keys.push(public_key); + } + + ed25519_dalek::verify_batch(&hashes, signatures.as_slice(), public_keys.as_slice()).map_err( + |err| { + MassaSignatureError::SignatureError(format!( + "Batch signature verification failed: {}", + err + )) + }, + ) } #[cfg(test)] @@ -910,7 +1332,7 @@ mod tests { #[test] #[serial] fn test_example() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let message = "Hello World!".as_bytes(); let hash = Hash::compute_from(message); let signature = keypair.sign(&hash).unwrap(); @@ -923,17 +1345,25 @@ mod tests { #[test] #[serial] fn test_serde_keypair() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let serialized = serde_json::to_string(&keypair).expect("could not serialize keypair"); let deserialized: KeyPair = serde_json::from_str(&serialized).expect("could not deserialize keypair"); - assert_eq!(keypair.0.public, deserialized.0.public); + + match (keypair, deserialized) { + (KeyPair::KeyPairV0(keypair), KeyPair::KeyPairV0(deserialized)) => { + assert_eq!(keypair.0.public, deserialized.0.public); + } + _ => { + panic!("Wrong version provided"); + } + } } #[test] #[serial] fn test_serde_public_key() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let public_key = keypair.get_public_key(); let serialized = serde_json::to_string(&public_key).expect("Could not serialize public key"); @@ -945,7 +1375,7 @@ mod tests { #[test] #[serial] fn test_serde_signature() { - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let message = "Hello World!".as_bytes(); let hash = Hash::compute_from(message); let signature = keypair.sign(&hash).unwrap(); diff --git a/massa-storage/Cargo.toml b/massa-storage/Cargo.toml index 15cd84bbe88..b07a1a5c3c9 100644 --- a/massa-storage/Cargo.toml +++ b/massa-storage/Cargo.toml @@ -1,19 +1,16 @@ [package] name = "massa_storage" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" [dependencies] parking_lot = { version = "0.12", features = ["deadlock_detection"] } -massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } -serde_json = "1.0" -tracing = "0.1" [dev-dependencies] massa_factory_exports = { path = "../massa-factory-exports", features=["testing"] } massa_signature = { path = "../massa-signature" } [features] -testing = ["massa_factory_exports/testing"] \ No newline at end of file +testing = ["massa_factory_exports/testing"] diff --git a/massa-storage/src/tests/basic.rs b/massa-storage/src/tests/basic.rs index db98c55f84c..3fd91ce022e 100644 --- a/massa-storage/src/tests/basic.rs +++ b/massa-storage/src/tests/basic.rs @@ -7,7 +7,7 @@ use massa_signature::KeyPair; /// Store a block and retrieve it. fn test_basic_insert() { let mut storage = Storage::create_root(); - let block = create_empty_block(&KeyPair::generate(), &Slot::new(0, 0)); + let block = create_empty_block(&KeyPair::generate(0).unwrap(), &Slot::new(0, 0)); storage.store_block(block.clone()); let blocks = storage.read_blocks(); @@ -21,7 +21,7 @@ fn test_basic_insert() { /// We expect that it's stored only one time fn test_double_insert() { let mut storage = Storage::create_root(); - let block = create_empty_block(&KeyPair::generate(), &Slot::new(0, 0)); + let block = create_empty_block(&KeyPair::generate(0).unwrap(), &Slot::new(0, 0)); storage.store_block(block.clone()); storage.store_block(block.clone()); diff --git a/massa-storage/src/tests/indexes.rs b/massa-storage/src/tests/indexes.rs index b27e7eae05c..66d8a32b4f7 100644 --- a/massa-storage/src/tests/indexes.rs +++ b/massa-storage/src/tests/indexes.rs @@ -7,7 +7,7 @@ use massa_signature::KeyPair; fn test_block_index_slot() { let mut storage = Storage::create_root(); let slot = Slot::new(0, 0); - let block = create_empty_block(&KeyPair::generate(), &slot); + let block = create_empty_block(&KeyPair::generate(0).unwrap(), &slot); storage.store_block(block.clone()); let blocks = storage.read_blocks(); @@ -20,7 +20,7 @@ fn test_block_index_slot() { fn test_block_index_by_creator() { let mut storage = Storage::create_root(); let slot = Slot::new(0, 0); - let keypair = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); let block = create_empty_block(&keypair, &slot); storage.store_block(block.clone()); @@ -36,8 +36,8 @@ fn test_block_index_by_creator() { fn test_block_fail_find() { let mut storage = Storage::create_root(); let slot = Slot::new(0, 0); - let keypair = KeyPair::generate(); - let keypair2 = KeyPair::generate(); + let keypair = KeyPair::generate(0).unwrap(); + let keypair2 = KeyPair::generate(0).unwrap(); let block = create_empty_block(&keypair, &slot); storage.store_block(block); diff --git a/massa-storage/src/tests/references.rs b/massa-storage/src/tests/references.rs index 2b96a0c33a5..cf949af207a 100644 --- a/massa-storage/src/tests/references.rs +++ b/massa-storage/src/tests/references.rs @@ -7,7 +7,7 @@ use massa_signature::KeyPair; fn test_clone() { let mut storage = Storage::create_root(); let slot = Slot::new(0, 0); - let block = create_empty_block(&KeyPair::generate(), &slot); + let block = create_empty_block(&KeyPair::generate(0).unwrap(), &slot); storage.store_block(block.clone()); let storage2 = storage.clone(); @@ -20,7 +20,7 @@ fn test_clone() { fn test_clone_without_ref() { let mut storage = Storage::create_root(); let slot = Slot::new(0, 0); - let block = create_empty_block(&KeyPair::generate(), &slot); + let block = create_empty_block(&KeyPair::generate(0).unwrap(), &slot); storage.store_block(block.clone()); let storage2 = storage.clone_without_refs(); @@ -32,7 +32,7 @@ fn test_clone_without_ref() { fn test_retrieve_all_ref_dropped() { let mut storage = Storage::create_root(); let slot = Slot::new(0, 0); - let block = create_empty_block(&KeyPair::generate(), &slot); + let block = create_empty_block(&KeyPair::generate(0).unwrap(), &slot); storage.store_block(block.clone()); let storage2 = storage.clone_without_refs(); @@ -57,7 +57,7 @@ fn test_retrieve_all_ref_dropped() { fn test_retrieve_all_ref_dropped_automatically() { let mut storage = Storage::create_root(); let slot = Slot::new(0, 0); - let block = create_empty_block(&KeyPair::generate(), &slot); + let block = create_empty_block(&KeyPair::generate(0).unwrap(), &slot); storage.store_block(block.clone()); let storage2 = storage.clone_without_refs(); diff --git a/massa-time/Cargo.toml b/massa-time/Cargo.toml index bb07914f18e..370aa2ad741 100644 --- a/massa-time/Cargo.toml +++ b/massa-time/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_time" -version = "0.1.0" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" @@ -11,7 +11,7 @@ time = { version = "0.3", features = ["serde", "formatting"] } displaydoc = "0.2" serde = { version = "1.0", features = ["derive"] } thiserror = "1.0" -nom = "7.1" +nom = "=7.1" # Custom dependencies massa_serialization = { path = "../massa-serialization" } diff --git a/massa-time/src/lib.rs b/massa-time/src/lib.rs index 54a6ff16cd7..3691bfe2956 100644 --- a/massa-time/src/lib.rs +++ b/massa-time/src/lib.rs @@ -18,7 +18,7 @@ use std::{ str::FromStr, }; use time::format_description::well_known::Rfc3339; -use time::OffsetDateTime; +use time::{Date, OffsetDateTime}; /// Time structure used everywhere. /// milliseconds since 01/01/1970. @@ -51,7 +51,7 @@ impl Serializer for MassaTimeSerializer { /// use massa_serialization::Serializer; /// use massa_time::{MassaTime, MassaTimeSerializer}; /// - /// let time: MassaTime = 30.into(); + /// let time: MassaTime = MassaTime::from_millis(30); /// let mut serialized = Vec::new(); /// let serializer = MassaTimeSerializer::new(); /// serializer.serialize(&time, &mut serialized).unwrap(); @@ -91,10 +91,10 @@ impl Deserializer for MassaTimeDeserializer { /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// use massa_time::{MassaTime, MassaTimeSerializer, MassaTimeDeserializer}; /// - /// let time: MassaTime = 30.into(); + /// let time: MassaTime = MassaTime::from_millis(30); /// let mut serialized = Vec::new(); /// let serializer = MassaTimeSerializer::new(); - /// let deserializer = MassaTimeDeserializer::new((Included(0.into()), Included(u64::MAX.into()))); + /// let deserializer = MassaTimeDeserializer::new((Included(MassaTime::from_millis(0)), Included(MassaTime::from_millis(u64::MAX)))); /// serializer.serialize(&time, &mut serialized).unwrap(); /// let (rest, time_deser) = deserializer.deserialize::(&serialized).unwrap(); /// assert!(rest.is_empty()); @@ -107,7 +107,7 @@ impl Deserializer for MassaTimeDeserializer { context("Failed MassaTime deserialization", |input| { self.u64_deserializer .deserialize(input) - .map(|(rest, res)| (rest, res.into())) + .map(|(rest, res)| (rest, MassaTime::from_millis(res))) })(buffer) } } @@ -127,7 +127,7 @@ impl TryFrom for MassaTime { /// # use massa_time::*; /// # use std::convert::TryFrom; /// let duration: Duration = Duration::from_millis(42); - /// let time : MassaTime = MassaTime::from(42); + /// let time : MassaTime = MassaTime::from_millis(42); /// assert_eq!(time, MassaTime::try_from(duration).unwrap()); /// ``` fn try_from(value: Duration) -> Result { @@ -140,17 +140,6 @@ impl TryFrom for MassaTime { } } -impl From for MassaTime { - /// Conversion from `u64`, representing timestamp in milliseconds. - /// ``` - /// # use massa_time::*; - /// let time : MassaTime = MassaTime::from(42); - /// ``` - fn from(val: u64) -> Self { - MassaTime(val) - } -} - impl From for Duration { /// Conversion from `massa_time` to duration, representing timestamp in milliseconds. /// ``` @@ -158,7 +147,7 @@ impl From for Duration { /// # use massa_time::*; /// # use std::convert::Into; /// let duration: Duration = Duration::from_millis(42); - /// let time : MassaTime = MassaTime::from(42); + /// let time : MassaTime = MassaTime::from_millis(42); /// let res: Duration = time.into(); /// assert_eq!(res, duration); /// ``` @@ -176,7 +165,7 @@ impl FromStr for MassaTime { /// # use massa_time::*; /// # use std::str::FromStr; /// let duration: &str = "42"; - /// let time : MassaTime = MassaTime::from(42); + /// let time : MassaTime = MassaTime::from_millis(42); /// /// assert_eq!(time, MassaTime::from_str(duration).unwrap()); /// ``` @@ -210,7 +199,7 @@ impl MassaTime { /// let now_duration : Duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); /// let now_massa_time : MassaTime = MassaTime::now().unwrap(); /// let converted :MassaTime = MassaTime::try_from(now_duration).unwrap(); - /// assert!(max(now_massa_time.saturating_sub(converted), converted.saturating_sub(now_massa_time)) < 100.into()) + /// assert!(max(now_massa_time.saturating_sub(converted), converted.saturating_sub(now_massa_time)) < MassaTime::from_millis(100)) /// ``` pub fn now() -> Result { let now: u64 = SystemTime::now() @@ -227,7 +216,7 @@ impl MassaTime { /// # use std::time::Duration; /// # use massa_time::*; /// let duration: Duration = Duration::from_millis(42); - /// let time : MassaTime = MassaTime::from(42); + /// let time : MassaTime = MassaTime::from_millis(42); /// let res: Duration = time.to_duration(); /// assert_eq!(res, duration); /// ``` @@ -238,7 +227,7 @@ impl MassaTime { /// Conversion to `u64`, representing milliseconds. /// ``` /// # use massa_time::*; - /// let time : MassaTime = MassaTime::from(42); + /// let time : MassaTime = MassaTime::from_millis(42); /// let res: u64 = time.to_millis(); /// assert_eq!(res, 42); /// ``` @@ -271,10 +260,10 @@ impl MassaTime { /// ``` /// # use massa_time::*; - /// let time_1 : MassaTime = MassaTime::from(42); - /// let time_2 : MassaTime = MassaTime::from(7); + /// let time_1 : MassaTime = MassaTime::from_millis(42); + /// let time_2 : MassaTime = MassaTime::from_millis(7); /// let res : MassaTime = time_1.saturating_sub(time_2); - /// assert_eq!(res, MassaTime::from(42-7)) + /// assert_eq!(res, MassaTime::from_millis(42-7)) /// ``` #[must_use] pub fn saturating_sub(self, t: MassaTime) -> Self { @@ -283,10 +272,10 @@ impl MassaTime { /// ``` /// # use massa_time::*; - /// let time_1 : MassaTime = MassaTime::from(42); - /// let time_2 : MassaTime = MassaTime::from(7); + /// let time_1 : MassaTime = MassaTime::from_millis(42); + /// let time_2 : MassaTime = MassaTime::from_millis(7); /// let res : MassaTime = time_1.saturating_add(time_2); - /// assert_eq!(res, MassaTime::from(42+7)) + /// assert_eq!(res, MassaTime::from_millis(42+7)) /// ``` #[must_use] pub fn saturating_add(self, t: MassaTime) -> Self { @@ -295,10 +284,10 @@ impl MassaTime { /// ``` /// # use massa_time::*; - /// let time_1 : MassaTime = MassaTime::from(42); - /// let time_2 : MassaTime = MassaTime::from(7); + /// let time_1 : MassaTime = MassaTime::from_millis(42); + /// let time_2 : MassaTime = MassaTime::from_millis(7); /// let res : MassaTime = time_1.checked_sub(time_2).unwrap(); - /// assert_eq!(res, MassaTime::from(42-7)) + /// assert_eq!(res, MassaTime::from_millis(42-7)) /// ``` pub fn checked_sub(self, t: MassaTime) -> Result { self.0 @@ -309,10 +298,10 @@ impl MassaTime { /// ``` /// # use massa_time::*; - /// let time_1 : MassaTime = MassaTime::from(42); - /// let time_2 : MassaTime = MassaTime::from(7); + /// let time_1 : MassaTime = MassaTime::from_millis(42); + /// let time_2 : MassaTime = MassaTime::from_millis(7); /// let res : MassaTime = time_1.checked_add(time_2).unwrap(); - /// assert_eq!(res, MassaTime::from(42+7)) + /// assert_eq!(res, MassaTime::from_millis(42+7)) /// ``` pub fn checked_add(self, t: MassaTime) -> Result { self.0 @@ -323,8 +312,8 @@ impl MassaTime { /// ``` /// # use massa_time::*; - /// let time_1 : MassaTime = MassaTime::from(42); - /// let time_2 : MassaTime = MassaTime::from(7); + /// let time_1 : MassaTime = MassaTime::from_millis(42); + /// let time_2 : MassaTime = MassaTime::from_millis(7); /// let res : u64 = time_1.checked_div_time(time_2).unwrap(); /// assert_eq!(res,42/7) /// ``` @@ -336,9 +325,9 @@ impl MassaTime { /// ``` /// # use massa_time::*; - /// let time_1 : MassaTime = MassaTime::from(42); + /// let time_1 : MassaTime = MassaTime::from_millis(42); /// let res : MassaTime = time_1.checked_div_u64(7).unwrap(); - /// assert_eq!(res,MassaTime::from(42/7)) + /// assert_eq!(res,MassaTime::from_millis(42/7)) /// ``` pub fn checked_div_u64(self, n: u64) -> Result { self.0 @@ -349,9 +338,9 @@ impl MassaTime { /// ``` /// # use massa_time::*; - /// let time_1 : MassaTime = MassaTime::from(42); + /// let time_1 : MassaTime = MassaTime::from_millis(42); /// let res : MassaTime = time_1.saturating_mul(7); - /// assert_eq!(res,MassaTime::from(42*7)) + /// assert_eq!(res,MassaTime::from_millis(42*7)) /// ``` #[must_use] pub const fn saturating_mul(self, n: u64) -> MassaTime { @@ -360,9 +349,9 @@ impl MassaTime { /// ``` /// # use massa_time::*; - /// let time_1 : MassaTime = MassaTime::from(42); + /// let time_1 : MassaTime = MassaTime::from_millis(42); /// let res : MassaTime = time_1.checked_mul(7).unwrap(); - /// assert_eq!(res,MassaTime::from(42*7)) + /// assert_eq!(res,MassaTime::from_millis(42*7)) /// ``` pub fn checked_mul(self, n: u64) -> Result { self.0 @@ -373,10 +362,10 @@ impl MassaTime { /// ``` /// # use massa_time::*; - /// let time_1 : MassaTime = MassaTime::from(42); - /// let time_2 : MassaTime = MassaTime::from(7); + /// let time_1 : MassaTime = MassaTime::from_millis(42); + /// let time_2 : MassaTime = MassaTime::from_millis(7); /// let res : MassaTime = time_1.checked_rem_time(time_2).unwrap(); - /// assert_eq!(res,MassaTime::from(42%7)) + /// assert_eq!(res,MassaTime::from_millis(42%7)) /// ``` pub fn checked_rem_time(self, t: MassaTime) -> Result { self.0 @@ -387,9 +376,9 @@ impl MassaTime { /// ``` /// # use massa_time::*; - /// let time_1 : MassaTime = MassaTime::from(42); + /// let time_1 : MassaTime = MassaTime::from_millis(42); /// let res : MassaTime = time_1.checked_rem_u64(7).unwrap(); - /// assert_eq!(res,MassaTime::from(42%7)) + /// assert_eq!(res,MassaTime::from_millis(42%7)) /// ``` pub fn checked_rem_u64(self, n: u64) -> Result { self.0 @@ -401,11 +390,11 @@ impl MassaTime { /// ``` /// # use massa_time::*; /// - /// let time1 = MassaTime::from(42); - /// let time2 = MassaTime::from(84); + /// let time1 = MassaTime::from_millis(42); + /// let time2 = MassaTime::from_millis(84); /// - /// assert_eq!(time1.abs_diff(time2), MassaTime::from(42)); - /// assert_eq!(time2.abs_diff(time1), MassaTime::from(42)); + /// assert_eq!(time1.abs_diff(time2), MassaTime::from_millis(42)); + /// assert_eq!(time2.abs_diff(time1), MassaTime::from_millis(42)); /// ``` pub fn abs_diff(&self, t: MassaTime) -> MassaTime { MassaTime(self.0.abs_diff(t.0)) @@ -413,17 +402,61 @@ impl MassaTime { /// ``` /// # use massa_time::*; - /// let massa_time : MassaTime = MassaTime::from(1_640_995_200_000); - /// assert_eq!(massa_time.to_utc_string(), "2022-01-01T00:00:00Z") + /// let massa_time : MassaTime = MassaTime::from_millis(1_640_995_200_000); + /// assert_eq!(massa_time.format_instant(), String::from("2022-01-01T00:00:00Z")) /// ``` - pub fn to_utc_string(self) -> String { + pub fn format_instant(&self) -> String { let naive = OffsetDateTime::from_unix_timestamp((self.to_millis() / 1000) as i64).unwrap(); naive.format(&Rfc3339).unwrap() } /// ``` /// # use massa_time::*; - /// let massa_time = MassaTime::from(1000 * ( 8 * 24*60*60 + 1 * 60*60 + 3 * 60 + 6 )); + /// let massa_time : MassaTime = MassaTime::from_millis(1000*( 8 * 24*60*60 + 1 * 60*60 + 3 * 60 + 6 )); + /// assert_eq!(massa_time.format_duration().unwrap(), String::from("8 days, 1 hours, 3 minutes, 6 seconds")) + /// ``` + pub fn format_duration(&self) -> Result { + let (days, hours, mins, secs) = self.days_hours_mins_secs()?; + Ok(format!( + "{} days, {} hours, {} minutes, {} seconds", + days, hours, mins, secs + )) + } + + /// ``` + /// # use massa_time::*; + /// let massa_time : MassaTime = MassaTime::from_utc_ymd_hms(2022, 2, 5, 22, 50, 40).unwrap(); + /// assert_eq!(massa_time.format_instant(), String::from("2022-02-05T22:50:40Z")) + /// ``` + pub fn from_utc_ymd_hms( + year: i32, + month: u8, + day: u8, + hour: u8, + minute: u8, + second: u8, + ) -> Result { + let month = month.try_into().map_err(|_| TimeError::ConversionError)?; + + let date = + Date::from_calendar_date(year, month, day).map_err(|_| TimeError::ConversionError)?; + + let date_time = date + .with_hms(hour, minute, second) + .map_err(|_| TimeError::ConversionError)? + .assume_utc(); + + Ok(MassaTime::from_millis( + date_time + .unix_timestamp_nanos() + .checked_div(1_000_000) + .ok_or(TimeError::ConversionError)? as u64, + )) + } + + /// ``` + /// # use massa_time::*; + /// let massa_time = MassaTime::from_millis(1000 * ( 8 * 24*60*60 + 1 * 60*60 + 3 * 60 + 6 )); /// let (days, hours, mins, secs) = massa_time.days_hours_mins_secs().unwrap(); /// assert_eq!(days, 8); /// assert_eq!(hours, 1); diff --git a/massa-versioning-exports/tests/mod.rs b/massa-versioning-exports/tests/mod.rs deleted file mode 100644 index 8b137891791..00000000000 --- a/massa-versioning-exports/tests/mod.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/massa-versioning-worker/src/versioning.rs b/massa-versioning-worker/src/versioning.rs deleted file mode 100644 index a4a47224d5d..00000000000 --- a/massa-versioning-worker/src/versioning.rs +++ /dev/null @@ -1,1253 +0,0 @@ -use std::cmp::Ordering; -use std::collections::{BTreeMap, BTreeSet, HashMap, VecDeque}; -use std::hash::{Hash, Hasher}; -use std::ops::Deref; -use std::sync::Arc; - -use machine::{machine, transitions}; -use num_enum::{IntoPrimitive, TryFromPrimitive}; -use parking_lot::RwLock; -use thiserror::Error; -use tracing::warn; - -use massa_models::{amount::Amount, config::VERSIONING_THRESHOLD_TRANSITION_ACCEPTED}; -use massa_time::MassaTime; - -// TODO: add more items here -/// Versioning component enum -#[allow(missing_docs)] -#[derive(Clone, Debug, PartialEq, Eq, Hash, TryFromPrimitive, IntoPrimitive)] -#[repr(u32)] -pub enum MipComponent { - Address, - Block, - VM, -} - -/// MIP info (name & versions & time range for a MIP) -#[derive(Clone, Debug)] -pub struct MipInfo { - /// MIP name or descriptive name - pub name: String, - /// Network (or global) version (to be included in block header) - pub version: u32, - /// Components concerned by this versioning (e.g. a new Block version), and the associated component_version - pub components: HashMap, - /// a timestamp at which the version gains its meaning (e.g. announced in block header) - pub start: MassaTime, - /// a timestamp at the which the deployment is considered failed - pub timeout: MassaTime, - /// Once deployment has been locked, wait for this duration before deployment is considered active - pub activation_delay: MassaTime, -} - -// Need Ord / PartialOrd so it is properly sorted in BTreeMap - -impl Ord for MipInfo { - fn cmp(&self, other: &Self) -> Ordering { - (self.start, &self.timeout).cmp(&(other.start, &other.timeout)) - } -} - -impl PartialOrd for MipInfo { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialEq for MipInfo { - fn eq(&self, other: &Self) -> bool { - self.name == other.name - && self.version == other.version - && self.components == other.components - && self.start == other.start - && self.timeout == other.timeout - && self.activation_delay == other.activation_delay - } -} - -impl Eq for MipInfo {} - -// Need to impl this manually otherwise clippy is angry :-P -impl Hash for MipInfo { - fn hash(&self, state: &mut H) { - self.name.hash(state); - self.version.hash(state); - self.components.iter().for_each(|c| c.hash(state)); - self.start.hash(state); - self.timeout.hash(state); - } -} - -machine!( - /// State machine for a Versioning component that tracks the deployment state - #[derive(Clone, Copy, Debug, PartialEq)] - pub(crate) enum ComponentState { - /// Initial state - Defined, - /// Past start, can only go to LockedIn after the threshold is above a given value - Started { pub(crate) threshold: Amount }, - /// Wait for some time before going to active (to let user the time to upgrade) - LockedIn { pub(crate) at: MassaTime }, - /// After LockedIn, deployment is considered successful (after activation delay) - Active, - /// Past the timeout, if LockedIn is not reach - Failed, - } -); - -impl Default for ComponentState { - fn default() -> Self { - Self::Defined(Defined {}) - } -} - -#[allow(missing_docs)] -#[derive(IntoPrimitive, Debug, Clone, Eq, PartialEq, TryFromPrimitive, PartialOrd, Ord)] -#[repr(u32)] -pub enum ComponentStateTypeId { - Error = 0, - Defined = 1, - Started = 2, - LockedIn = 3, - Active = 4, - Failed = 5, -} - -impl From<&ComponentState> for ComponentStateTypeId { - fn from(value: &ComponentState) -> Self { - match value { - ComponentState::Error => ComponentStateTypeId::Error, - ComponentState::Defined(_) => ComponentStateTypeId::Defined, - ComponentState::Started(_) => ComponentStateTypeId::Started, - ComponentState::LockedIn(_) => ComponentStateTypeId::LockedIn, - ComponentState::Active(_) => ComponentStateTypeId::Active, - ComponentState::Failed(_) => ComponentStateTypeId::Failed, - } - } -} - -/// A message to update the `ComponentState` -#[derive(Clone, Debug)] -pub struct Advance { - /// from VersioningInfo.start - pub start_timestamp: MassaTime, - /// from VersioningInfo.timeout - pub timeout: MassaTime, - /// % of past blocks with this version - pub threshold: Amount, - /// Current time (timestamp) - pub now: MassaTime, - /// TODO - pub activation_delay: MassaTime, -} - -// Need Ord / PartialOrd so it is properly sorted in BTreeMap - -impl Ord for Advance { - fn cmp(&self, other: &Self) -> Ordering { - (self.now).cmp(&other.now) - } -} - -impl PartialOrd for Advance { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialEq for Advance { - fn eq(&self, other: &Self) -> bool { - self.start_timestamp == other.start_timestamp - && self.timeout == other.timeout - && self.threshold == other.threshold - && self.now == other.now - } -} - -impl Eq for Advance {} - -transitions!(ComponentState, - [ - (Defined, Advance) => [Defined, Started, Failed], - (Started, Advance) => [Started, LockedIn, Failed], - (LockedIn, Advance) => [LockedIn, Active], - (Active, Advance) => Active, - (Failed, Advance) => Failed - ] -); - -impl Defined { - /// Update state from state Defined - pub fn on_advance(self, input: Advance) -> ComponentState { - match input.now { - n if n >= input.timeout => ComponentState::failed(), - n if n >= input.start_timestamp => ComponentState::started(Amount::zero()), - _ => ComponentState::Defined(Defined {}), - } - } -} - -impl Started { - /// Update state from state Started - pub fn on_advance(self, input: Advance) -> ComponentState { - if input.now > input.timeout { - return ComponentState::failed(); - } - - if input.threshold >= VERSIONING_THRESHOLD_TRANSITION_ACCEPTED { - ComponentState::locked_in(input.now) - } else { - ComponentState::started(input.threshold) - } - } -} - -impl LockedIn { - /// Update state from state LockedIn ... - pub fn on_advance(self, input: Advance) -> ComponentState { - if input.now > self.at.saturating_add(input.activation_delay) { - ComponentState::active() - } else { - ComponentState::locked_in(self.at) - } - } -} - -impl Active { - /// Update state (will always stay in state Active) - pub fn on_advance(self, _input: Advance) -> Active { - Active {} - } -} - -impl Failed { - /// Update state (will always stay in state Failed) - pub fn on_advance(self, _input: Advance) -> Failed { - Failed {} - } -} - -/// Wrapper of ComponentState (in order to keep state history) -#[derive(Debug, Clone, PartialEq)] -pub struct MipState { - pub(crate) state: ComponentState, - pub(crate) history: BTreeMap, -} - -impl MipState { - /// Create - pub fn new(defined: MassaTime) -> Self { - let state: ComponentState = Default::default(); // Default is Defined - let state_id = ComponentStateTypeId::from(&state); - // Build a 'dummy' advance msg for state Defined, this is to avoid using an - // Option in MipStateHistory::history - let advance = Advance { - start_timestamp: MassaTime::from(0), - timeout: MassaTime::from(0), - threshold: Default::default(), - now: defined, - activation_delay: MassaTime::from(0), - }; - - let history = BTreeMap::from([(advance, state_id)]); - Self { state, history } - } - - /// Advance the state - /// Can be called as multiple times as it will only store what changes the state in history - pub fn on_advance(&mut self, input: &Advance) { - let now = input.now; - // Check that input.now is after last item in history - // We don't want to go backward - let is_forward = self - .history - .last_key_value() - .map(|(adv, _)| adv.now < now) - .unwrap_or(false); - - if is_forward { - // machines crate (for state machine) does not support passing ref :-/ - let state = self.state.on_advance(input.clone()); - // Update history as well - if state != self.state { - let state_id = ComponentStateTypeId::from(&state); - - // Avoid storing too much things in history - // Here we avoid storing for every threshold update - if !(matches!(state, ComponentState::Started(Started { .. })) - && matches!(self.state, ComponentState::Started(Started { .. }))) - { - self.history.insert(input.clone(), state_id); - } - self.state = state; - } - } - } - - /// Given a corresponding VersioningInfo, check if state is coherent - /// it is coherent - /// if state can be at this position (e.g. can it be at state "Started" according to given time range) - /// if history is coherent with current state - /// Return false for state == ComponentState::Error - pub fn is_coherent_with(&self, versioning_info: &MipInfo) -> bool { - // TODO: rename versioning_info -> mip_info - - // Always return false for state Error or if history is empty - if matches!(&self.state, &ComponentState::Error) || self.history.is_empty() { - return false; - } - - // safe to unwrap (already tested if empty or not) - let (initial_ts, initial_state_id) = self.history.first_key_value().unwrap(); - if *initial_state_id != ComponentStateTypeId::Defined { - // self.history does not start with Defined -> (always) false - return false; - } - - // Build a new VersionStateHistory from initial state, replaying the whole history - // but with given versioning info then compare - let mut vsh = MipState::new(initial_ts.now); - let mut advance_msg = Advance { - start_timestamp: versioning_info.start, - timeout: versioning_info.timeout, - threshold: Amount::zero(), - now: initial_ts.now, - activation_delay: versioning_info.activation_delay, - }; - - for (adv, _state) in self.history.iter().skip(1) { - advance_msg.now = adv.now; - advance_msg.threshold = adv.threshold; - vsh.on_advance(&advance_msg); - } - - vsh == *self - } - - /// Query state at given timestamp - /// TODO: add doc for start & timeout parameter? why do we need them? - pub fn state_at( - &self, - ts: MassaTime, - start: MassaTime, - timeout: MassaTime, - ) -> Result { - if self.history.is_empty() { - return Err(StateAtError::EmptyHistory); - } - - // Optim: this avoids iterating over history (cheap to retrieve first item) - let first = self.history.first_key_value().unwrap(); // safe to unwrap - if ts < first.0.now { - // Before initial state - return Err(StateAtError::BeforeInitialState(first.1.clone(), ts)); - } - - // At this point, we are >= the first state in history - let mut lower_bound = None; - let mut higher_bound = None; - let mut is_after_last = false; - - // Optim: this avoids iterating over history (cheap to retrieve first item) - let last = self.history.last_key_value().unwrap(); // safe to unwrap - if ts > last.0.now { - lower_bound = Some(last); - is_after_last = true; - } - - if !is_after_last { - // We are in between two states in history, find bounds - for (adv, state_id) in self.history.iter() { - if adv.now <= ts { - lower_bound = Some((adv, state_id)); - } - if adv.now >= ts && higher_bound.is_none() { - higher_bound = Some((adv, state_id)); - break; - } - } - } - - match (lower_bound, higher_bound) { - (Some((_adv_1, st_id_1)), Some((_adv_2, _st_id_2))) => { - // Between 2 states (e.g. between Defined && Started) -> return Defined - Ok(st_id_1.clone()) - } - (Some((adv, st_id)), None) => { - // After the last state in history -> need to advance the state and return - let threshold_for_transition = VERSIONING_THRESHOLD_TRANSITION_ACCEPTED; - // Note: Please update this if MipState transitions change as it might not hold true - if *st_id == ComponentStateTypeId::Started - && adv.threshold < threshold_for_transition - && ts < adv.timeout - { - Err(StateAtError::Unpredictable) - } else { - let msg = Advance { - start_timestamp: start, - timeout, - threshold: adv.threshold, - now: ts, - activation_delay: adv.activation_delay, - }; - // Return the resulting state after advance - let state = self.state.on_advance(msg); - Ok(ComponentStateTypeId::from(&state)) - } - } - _ => { - // 1. Before the first state in history: already covered - // 2. None, None: already covered - empty history - Err(StateAtError::EmptyHistory) - } - } - } -} - -/// Error returned by MipStateHistory::state_at -#[allow(missing_docs)] -#[derive(Error, Debug, PartialEq)] -pub enum StateAtError { - #[error("Initial state ({0:?}) only defined after timestamp: {1}")] - BeforeInitialState(ComponentStateTypeId, MassaTime), - #[error("Empty history, should never happen")] - EmptyHistory, - #[error("Cannot predict in the future (~ threshold not reach yet)")] - Unpredictable, -} - -// Store - -/// Database for all MIP info -#[derive(Debug, Clone)] -pub struct MipStore(pub Arc>); - -impl MipStore { - /// Retrieve the current network version to set in block header - pub fn get_network_version_current(&self) -> u32 { - let lock = self.0.read(); - let store = lock.deref(); - // Current version == last active - store - .store - .iter() - .rev() - .find_map(|(k, v)| (v.state == ComponentState::active()).then_some(k.version)) - .unwrap_or(0) - } - - /// Retrieve the network version number to announce in block header - /// return 0 is there is nothing to announce - pub fn get_network_version_to_announce(&self) -> u32 { - let lock = self.0.read(); - let store = lock.deref(); - // Announce the latest versioning info in Started / LockedIn state - // Defined == Not yet ready to announce - // Active == current version - store - .store - .iter() - .rev() - .find_map(|(k, v)| { - matches!( - &v.state, - &ComponentState::Started(_) | &ComponentState::LockedIn(_) - ) - .then_some(k.version) - }) - .unwrap_or(0) - } - - pub fn update_network_version_stats( - &mut self, - slot_timestamp: MassaTime, - network_versions: Option<(u32, u32)>, - ) { - let mut lock = self.0.write(); - lock.update_network_version_stats(slot_timestamp, network_versions); - } - - #[allow(clippy::result_unit_err)] - pub fn update_with( - &mut self, - mip_store: &MipStore, - ) -> Result<(Vec, Vec), ()> { - let mut lock = self.0.write(); - let lock_other = mip_store.0.read(); - lock.update_with(lock_other.deref()) - } -} - -impl TryFrom<([(MipInfo, MipState); N], MipStatsConfig)> for MipStore { - type Error = (); - - fn try_from( - (value, cfg): ([(MipInfo, MipState); N], MipStatsConfig), - ) -> Result { - MipStoreRaw::try_from((value, cfg)).map(|store_raw| Self(Arc::new(RwLock::new(store_raw)))) - } -} - -/// Statistics in MipStoreRaw -#[derive(Debug, Clone, PartialEq)] -pub struct MipStatsConfig { - pub block_count_considered: usize, - pub counters_max: usize, -} - -/// In order for a MIP to be accepted, we compute stats about other node 'network' version announcement -#[derive(Debug, Clone, PartialEq)] -pub(crate) struct MipStoreStats { - pub(crate) config: MipStatsConfig, - pub(crate) latest_announcements: VecDeque, - pub(crate) network_version_counters: BTreeMap, -} - -impl MipStoreStats { - pub(crate) fn new(config: MipStatsConfig) -> Self { - Self { - config: config.clone(), - latest_announcements: VecDeque::with_capacity(config.block_count_considered), - network_version_counters: Default::default(), - } - } -} - -/// Store of all versioning info -#[derive(Debug, Clone, PartialEq)] -pub struct MipStoreRaw { - pub(crate) store: BTreeMap, - pub(crate) stats: MipStoreStats, -} - -impl MipStoreRaw { - /// Update our store with another (usually after a bootstrap where we received another store) - /// Return list of updated / added if update is successful - #[allow(clippy::result_unit_err)] - pub fn update_with( - &mut self, - store_raw: &MipStoreRaw, - ) -> Result<(Vec, Vec), ()> { - // iter over items in given store: - // -> 2 cases: - // * MipInfo is already in self store -> add to 'to_update' list - // * MipInfo is not in self.store -> We received a new MipInfo so we are running an out dated version - // of the software - // We then return the list of new MipInfo so we can warn and ask - // to update the software - - let mut component_versions: HashMap = self - .store - .iter() - .flat_map(|c| { - c.0.components - .iter() - .map(|(mip_component, component_version)| { - (mip_component.clone(), *component_version) - }) - }) - .collect(); - let mut names: BTreeSet = self.store.iter().map(|i| i.0.name.clone()).collect(); - let mut to_update: BTreeMap = Default::default(); - let mut to_add: BTreeMap = Default::default(); - let mut should_merge = true; - - for (v_info, v_state) in store_raw.store.iter() { - if !v_state.is_coherent_with(v_info) { - // As soon as we found one non coherent state we abort the merge - should_merge = false; - break; - } - - if let Some(v_state_orig) = self.store.get(v_info) { - // Versioning info (from right) is already in self (left) - // Need to check if we add this to 'to_update' list - let v_state_id: u32 = ComponentStateTypeId::from(&v_state.state).into(); - let v_state_orig_id: u32 = ComponentStateTypeId::from(&v_state_orig.state).into(); - - if matches!( - v_state_orig.state, - ComponentState::Defined(_) - | ComponentState::Started(_) - | ComponentState::LockedIn(_) - ) { - // Only accept 'higher' state - // (e.g. 'started' if 'defined', 'locked in' if 'started'...) - if v_state_id > v_state_orig_id { - to_update.insert(v_info.clone(), v_state.clone()); - } else { - // Trying to downgrade state' (e.g. trying to go from 'active' -> 'defined') - should_merge = false; - break; - } - } - } else { - // Versioning info (from right) is not in self.0 (left) - // Need to check if we add this to 'to_add' list - - let last_v_info_ = to_add - .last_key_value() - .map(|i| i.0) - .or(self.store.last_key_value().map(|i| i.0)); - - if let Some(last_v_info) = last_v_info_ { - // check for versions of all components in v_info - let mut component_version_compatible = true; - for component in v_info.components.iter() { - if component.1 <= component_versions.get(component.0).unwrap_or(&0) { - component_version_compatible = false; - break; - } - } - - if v_info.start > last_v_info.timeout - && v_info.timeout > v_info.start - && v_info.version > last_v_info.version - && !names.contains(&v_info.name) - && component_version_compatible - { - // Time range is ok / version is ok / name is unique, let's add it - to_add.insert(v_info.clone(), v_state.clone()); - names.insert(v_info.name.clone()); - for component in v_info.components.iter() { - component_versions.insert(component.0.clone(), *component.1); - } - } else { - // Something is wrong (time range not ok? / version not incr? / names? - // or component version not incr?) - should_merge = false; - break; - } - } else { - // to_add is empty && self.0 is empty - to_add.insert(v_info.clone(), v_state.clone()); - names.insert(v_info.name.clone()); - } - } - } - - if should_merge { - let added = to_add.keys().cloned().collect(); - let updated = to_update.keys().cloned().collect(); - - self.store.append(&mut to_update); - self.store.append(&mut to_add); - Ok((updated, added)) - } else { - Err(()) - } - } - - fn update_network_version_stats( - &mut self, - slot_timestamp: MassaTime, - network_versions: Option<(u32, u32)>, - ) { - if let Some((_current_network_version, announced_network_version)) = network_versions { - let removed_version_ = match self.stats.latest_announcements.len() { - n if n > self.stats.config.block_count_considered => { - self.stats.latest_announcements.pop_front() - } - _ => None, - }; - self.stats - .latest_announcements - .push_back(announced_network_version); - - // We update the count of the received version - *self - .stats - .network_version_counters - .entry(announced_network_version) - .or_default() += 1; - - if let Some(removed_version) = removed_version_ { - *self - .stats - .network_version_counters - .entry(removed_version) - .or_insert(1) -= 1; - } - - // Cleanup for the counters - if self.stats.network_version_counters.len() > self.stats.config.counters_max { - if let Some((version, count)) = self.stats.network_version_counters.pop_first() { - // TODO: return version / count for unit tests? - warn!( - "MipStoreStats removed counter for version {}, count was: {}", - version, count - ) - } - } - - self.advance_states_on_updated_stats(slot_timestamp); - } - } - - /// Used internally by `update_network_version_stats` - fn advance_states_on_updated_stats(&mut self, slot_timestamp: MassaTime) { - for (mi, state) in self.store.iter_mut() { - let network_version_count = *self - .stats - .network_version_counters - .get(&mi.version) - .unwrap_or(&0) as f32; - let block_count_considered = self.stats.config.block_count_considered as f32; - - let vote_ratio_ = 100.0 * network_version_count / block_count_considered; - - let vote_ratio = Amount::from_mantissa_scale(vote_ratio_.round() as u64, 0); - - let advance_msg = Advance { - start_timestamp: mi.start, - timeout: mi.timeout, - threshold: vote_ratio, - now: slot_timestamp, - activation_delay: mi.activation_delay, - }; - - // TODO / OPTIM: filter the store to avoid advancing on failed and active versions - state.on_advance(&advance_msg.clone()); - } - } -} - -impl TryFrom<([(MipInfo, MipState); N], MipStatsConfig)> for MipStoreRaw { - type Error = (); - - fn try_from( - (value, cfg): ([(MipInfo, MipState); N], MipStatsConfig), - ) -> Result { - // Build an empty store - let mut store = Self { - store: Default::default(), - stats: MipStoreStats::new(cfg.clone()), - }; - - // Build another one with given value - let other_store = Self { - store: BTreeMap::from(value), - stats: MipStoreStats::new(cfg), - }; - - // Use update_with ensuring that we have no overlapping time range, unique names & ... - match store.update_with(&other_store) { - Ok(_) => Ok(store), - Err(_) => Err(()), - } - } -} - -// End Store - -#[cfg(test)] -mod test { - use super::*; - - use std::str::FromStr; - - use chrono::{Days, NaiveDate, NaiveDateTime}; - - use crate::test_helpers::versioning_helpers::advance_state_until; - - use massa_models::config::{MIP_STORE_STATS_BLOCK_CONSIDERED, MIP_STORE_STATS_COUNTERS_MAX}; - - // Only for unit tests - impl PartialEq for MipState { - fn eq(&self, other: &ComponentState) -> bool { - self.state == *other - } - } - - impl From<(&MipInfo, &Amount, &MassaTime)> for Advance { - fn from((mip_info, threshold, now): (&MipInfo, &Amount, &MassaTime)) -> Self { - Self { - start_timestamp: mip_info.start, - timeout: mip_info.timeout, - threshold: *threshold, - now: *now, - activation_delay: mip_info.activation_delay, - } - } - } - - fn get_a_version_info() -> (NaiveDateTime, NaiveDateTime, MipInfo) { - // A helper function to provide a default VersioningInfo - // Models a Massa Improvements Proposal (MIP-0002), transitioning component address to v2 - - let start: NaiveDateTime = NaiveDate::from_ymd_opt(2017, 11, 01) - .unwrap() - .and_hms_opt(7, 33, 44) - .unwrap(); - - let timeout: NaiveDateTime = NaiveDate::from_ymd_opt(2017, 11, 11) - .unwrap() - .and_hms_opt(7, 33, 44) - .unwrap(); - - return ( - start, - timeout, - MipInfo { - name: "MIP-0002".to_string(), - version: 2, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(start.timestamp() as u64), - timeout: MassaTime::from(timeout.timestamp() as u64), - activation_delay: MassaTime::from(20), - }, - ); - } - - #[test] - fn test_state_advance_from_defined() { - // Test Versioning state transition (from state: Defined) - let (_, _, mi) = get_a_version_info(); - let mut state: ComponentState = Default::default(); - assert_eq!(state, ComponentState::defined()); - - let now = mi.start.saturating_sub(MassaTime::from(1)); - let mut advance_msg = Advance::from((&mi, &Amount::zero(), &now)); - - state = state.on_advance(advance_msg.clone()); - assert_eq!(state, ComponentState::defined()); - - let now = mi.start.saturating_add(MassaTime::from(5)); - advance_msg.now = now; - state = state.on_advance(advance_msg); - - // println!("state: {:?}", state); - assert_eq!( - state, - ComponentState::Started(Started { - threshold: Amount::zero() - }) - ); - } - - #[test] - fn test_state_advance_from_started() { - // Test Versioning state transition (from state: Started) - let (_, _, mi) = get_a_version_info(); - let mut state: ComponentState = ComponentState::started(Default::default()); - - let now = mi.start; - let threshold_too_low = Amount::from_str("74.9").unwrap(); - let threshold_ok = Amount::from_str("82.42").unwrap(); - let mut advance_msg = Advance::from((&mi, &threshold_too_low, &now)); - - state = state.on_advance(advance_msg.clone()); - assert_eq!(state, ComponentState::started(threshold_too_low)); - advance_msg.threshold = threshold_ok; - state = state.on_advance(advance_msg); - assert_eq!(state, ComponentState::locked_in(now)); - } - - #[test] - fn test_state_advance_from_locked_in() { - // Test Versioning state transition (from state: LockedIn) - let (_, _, mi) = get_a_version_info(); - - let locked_in_at = mi.start.saturating_add(MassaTime::from(1)); - let mut state: ComponentState = ComponentState::locked_in(locked_in_at); - - let now = mi.start; - let mut advance_msg = Advance::from((&mi, &Amount::zero(), &now)); - - state = state.on_advance(advance_msg.clone()); - assert_eq!(state, ComponentState::locked_in(locked_in_at)); - - advance_msg.now = advance_msg.timeout.saturating_add(MassaTime::from(1)); - state = state.on_advance(advance_msg); - assert_eq!(state, ComponentState::active()); - } - - #[test] - fn test_state_advance_from_active() { - // Test Versioning state transition (from state: Active) - let (_, _, mi) = get_a_version_info(); - let mut state = ComponentState::active(); - let now = mi.start; - let advance = Advance::from((&mi, &Amount::zero(), &now)); - - state = state.on_advance(advance); - assert_eq!(state, ComponentState::active()); - } - - #[test] - fn test_state_advance_from_failed() { - // Test Versioning state transition (from state: Failed) - let (_, _, mi) = get_a_version_info(); - let mut state = ComponentState::failed(); - let now = mi.start; - let advance = Advance::from((&mi, &Amount::zero(), &now)); - state = state.on_advance(advance); - assert_eq!(state, ComponentState::failed()); - } - - #[test] - fn test_state_advance_to_failed() { - // Test Versioning state transition (to state: Failed) - let (_, _, mi) = get_a_version_info(); - let now = mi.timeout.saturating_add(MassaTime::from(1)); - let advance_msg = Advance::from((&mi, &Amount::zero(), &now)); - - let mut state: ComponentState = Default::default(); // Defined - state = state.on_advance(advance_msg.clone()); - assert_eq!(state, ComponentState::Failed(Failed {})); - - let mut state = ComponentState::started(Default::default()); - state = state.on_advance(advance_msg.clone()); - assert_eq!(state, ComponentState::Failed(Failed {})); - } - - #[test] - fn test_state_with_history() { - // Test MipStateHistory::state_at() function - - let (start, _, mi) = get_a_version_info(); - let now_0 = MassaTime::from(start.timestamp() as u64); - let mut state = MipState::new(now_0); - - assert_eq!(state, ComponentState::defined()); - - let now = mi.start.saturating_add(MassaTime::from(15)); - let mut advance_msg = Advance::from((&mi, &Amount::zero(), &now)); - - // Move from Defined -> Started - state.on_advance(&advance_msg); - assert_eq!(state, ComponentState::started(Amount::zero())); - - // Check history - assert_eq!(state.history.len(), 2); - assert!(matches!( - state.history.first_key_value(), - Some((&Advance { .. }, &ComponentStateTypeId::Defined)) - )); - assert!(matches!( - state.history.last_key_value(), - Some((&Advance { .. }, &ComponentStateTypeId::Started)) - )); - - // Query with timestamp - - // Before Defined - let state_id_ = state.state_at( - mi.start.saturating_sub(MassaTime::from(5)), - mi.start, - mi.timeout, - ); - assert!(matches!( - state_id_, - Err(StateAtError::BeforeInitialState(_, _)) - )); - // After Defined timestamp - let state_id = state.state_at(mi.start, mi.start, mi.timeout).unwrap(); - assert_eq!(state_id, ComponentStateTypeId::Defined); - // At Started timestamp - let state_id = state.state_at(now, mi.start, mi.timeout).unwrap(); - assert_eq!(state_id, ComponentStateTypeId::Started); - - // After Started timestamp but before timeout timestamp - let after_started_ts = now.saturating_add(MassaTime::from(15)); - let state_id_ = state.state_at(after_started_ts, mi.start, mi.timeout); - assert_eq!(state_id_, Err(StateAtError::Unpredictable)); - - // After Started timestamp and after timeout timestamp - let after_timeout_ts = mi.timeout.saturating_add(MassaTime::from(15)); - let state_id = state - .state_at(after_timeout_ts, mi.start, mi.timeout) - .unwrap(); - assert_eq!(state_id, ComponentStateTypeId::Failed); - - // Move from Started to LockedIn - let threshold = VERSIONING_THRESHOLD_TRANSITION_ACCEPTED; - advance_msg.threshold = threshold.saturating_add(Amount::from_str("1.0").unwrap()); - advance_msg.now = now.saturating_add(MassaTime::from(1)); - state.on_advance(&advance_msg); - assert_eq!(state, ComponentState::locked_in(advance_msg.now)); - - // Query with timestamp - // After LockedIn timestamp and before timeout timestamp - let after_locked_in_ts = now.saturating_add(MassaTime::from(10)); - let state_id = state - .state_at(after_locked_in_ts, mi.start, mi.timeout) - .unwrap(); - assert_eq!(state_id, ComponentStateTypeId::LockedIn); - // After LockedIn timestamp and after timeout timestamp - let state_id = state - .state_at(after_timeout_ts, mi.start, mi.timeout) - .unwrap(); - assert_eq!(state_id, ComponentStateTypeId::Active); - } - - #[test] - fn test_versioning_store_announce_current() { - // Test VersioningInfo::get_version_to_announce() & ::get_version_current() - - let (_start, timeout, mi) = get_a_version_info(); - - let mut mi_2 = mi.clone(); - mi_2.version += 1; - mi_2.start = - MassaTime::from(timeout.checked_add_days(Days::new(2)).unwrap().timestamp() as u64); - mi_2.timeout = - MassaTime::from(timeout.checked_add_days(Days::new(5)).unwrap().timestamp() as u64); - - // Can only build such object in test - history is empty :-/ - let vs_1 = MipState { - state: ComponentState::active(), - history: Default::default(), - }; - let vs_2 = MipState { - state: ComponentState::started(Amount::zero()), - history: Default::default(), - }; - - // TODO: Have VersioningStore::from ? - let mip_stats_cfg = MipStatsConfig { - block_count_considered: 10, - counters_max: 5, - }; - let vs_raw = MipStoreRaw { - store: BTreeMap::from([(mi.clone(), vs_1), (mi_2.clone(), vs_2)]), - stats: MipStoreStats::new(mip_stats_cfg.clone()), - }; - // let vs_raw = MipStoreRaw::try_from([(vi.clone(), vs_1), (vi_2.clone(), vs_2)]).unwrap(); - let vs = MipStore(Arc::new(RwLock::new(vs_raw))); - - assert_eq!(vs.get_network_version_current(), mi.version); - assert_eq!(vs.get_network_version_to_announce(), mi_2.version); - - // Test also an empty versioning store - let vs_raw = MipStoreRaw { - store: Default::default(), - stats: MipStoreStats::new(mip_stats_cfg), - }; - let vs = MipStore(Arc::new(RwLock::new(vs_raw))); - assert_eq!(vs.get_network_version_current(), 0); - assert_eq!(vs.get_network_version_to_announce(), 0); - } - - #[test] - fn test_is_coherent_with() { - // Test MipStateHistory::is_coherent_with - - // Given the following versioning info, we expect state - // Defined @ time <= 2 - // Started @ time > 2 && <= 5 - // LockedIn @ time > time(Started) && <= 5 - // Active @time > 5 - let vi_1 = MipInfo { - name: "MIP-0002".to_string(), - version: 2, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(2), - timeout: MassaTime::from(5), - activation_delay: MassaTime::from(2), - }; - // Another versioning info (from an attacker) for testing - let vi_2 = MipInfo { - name: "MIP-0002".to_string(), - version: 2, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(7), - timeout: MassaTime::from(10), - activation_delay: MassaTime::from(2), - }; - - let vsh = MipState { - state: ComponentState::Error, - history: Default::default(), - }; - // At state Error -> (always) false - assert_eq!(vsh.is_coherent_with(&vi_1), false); - - let vsh = MipState { - state: ComponentState::defined(), - history: Default::default(), - }; - // At state Defined but no history -> false - assert_eq!(vsh.is_coherent_with(&vi_1), false); - - let mut vsh = MipState::new(MassaTime::from(1)); - // At state Defined at time 1 -> true, given vi_1 @ time 1 - assert_eq!(vsh.is_coherent_with(&vi_1), true); - // At state Defined at time 1 -> false given vi_1 @ time 3 (state should be Started) - // assert_eq!(vsh.is_coherent_with(&vi_1, MassaTime::from(3)), false); - - // Advance to Started - let now = MassaTime::from(3); - let adv = Advance::from((&vi_1, &Amount::zero(), &now)); - vsh.on_advance(&adv); - - // At state Started at time now -> true - assert_eq!(vsh.state, ComponentState::started(Amount::zero())); - assert_eq!(vsh.is_coherent_with(&vi_1), true); - // Now with another versioning info - assert_eq!(vsh.is_coherent_with(&vi_2), false); - - // Advance to LockedIn - let now = MassaTime::from(4); - let adv = Advance::from((&vi_1, &VERSIONING_THRESHOLD_TRANSITION_ACCEPTED, &now)); - vsh.on_advance(&adv); - - // At state LockedIn at time now -> true - assert_eq!(vsh.state, ComponentState::locked_in(now)); - assert_eq!(vsh.is_coherent_with(&vi_1), true); - assert_eq!(vsh.is_coherent_with(&vi_1), true); - - // edge cases - // TODO: history all good but does not start with Defined, start with Started - } - - #[test] - fn test_merge_with() { - let vi_1 = MipInfo { - name: "MIP-0002".to_string(), - version: 2, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(2), - timeout: MassaTime::from(5), - activation_delay: MassaTime::from(2), - }; - - let vs_1 = advance_state_until(ComponentState::active(), &vi_1); - assert_eq!(vs_1, ComponentState::active()); - - let vi_2 = MipInfo { - name: "MIP-0003".to_string(), - version: 3, - components: HashMap::from([(MipComponent::Address, 2)]), - start: MassaTime::from(17), - timeout: MassaTime::from(27), - activation_delay: MassaTime::from(2), - }; - let vs_2 = advance_state_until(ComponentState::defined(), &vi_2); - - let mip_stats_cfg = MipStatsConfig { - block_count_considered: 10, - counters_max: 5, - }; - let mut vs_raw_1 = MipStoreRaw { - store: BTreeMap::from([(vi_1.clone(), vs_1.clone()), (vi_2.clone(), vs_2.clone())]), - stats: MipStoreStats::new(mip_stats_cfg.clone()), - }; - - let vs_2_2 = advance_state_until(ComponentState::active(), &vi_2); - assert_eq!(vs_2_2, ComponentState::active()); - - let vs_raw_2 = MipStoreRaw::try_from(( - [(vi_1.clone(), vs_1.clone()), (vi_2.clone(), vs_2_2.clone())], - mip_stats_cfg, - )) - .unwrap(); - - vs_raw_1.update_with(&vs_raw_2).unwrap(); - - // Expect state 1 (for vi_1) no change, state 2 (for vi_2) updated to "Active" - assert_eq!(vs_raw_1.store.get(&vi_1).unwrap().state, vs_1.state); - assert_eq!(vs_raw_1.store.get(&vi_2).unwrap().state, vs_2_2.state); - } - - #[test] - fn test_merge_with_invalid() { - // Test updating a versioning store with another invalid: - // 1- overlapping time range - // 2- overlapping versioning component - - let vi_1 = MipInfo { - name: "MIP-0002".to_string(), - version: 2, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(0), - timeout: MassaTime::from(5), - activation_delay: MassaTime::from(2), - }; - let vs_1 = advance_state_until(ComponentState::active(), &vi_1); - assert_eq!(vs_1, ComponentState::active()); - - let vi_2 = MipInfo { - name: "MIP-0003".to_string(), - version: 3, - components: HashMap::from([(MipComponent::Address, 2)]), - start: MassaTime::from(17), - timeout: MassaTime::from(27), - activation_delay: MassaTime::from(2), - }; - let vs_2 = advance_state_until(ComponentState::defined(), &vi_2); - assert_eq!(vs_2, ComponentState::defined()); - - let mip_stats_cfg = MipStatsConfig { - block_count_considered: 10, - counters_max: 5, - }; - - let mut vs_raw_1 = MipStoreRaw::try_from(( - [(vi_1.clone(), vs_1.clone()), (vi_2.clone(), vs_2.clone())], - mip_stats_cfg.clone(), - )) - .unwrap(); - - let mut vi_2_2 = vi_2.clone(); - // Make versioning info invalid (because start == vi_1.timeout) - vi_2_2.start = vi_1.timeout; - let vs_2_2 = advance_state_until(ComponentState::defined(), &vi_2_2); - let vs_raw_2 = MipStoreRaw { - store: BTreeMap::from([ - (vi_1.clone(), vs_1.clone()), - (vi_2_2.clone(), vs_2_2.clone()), - ]), - stats: MipStoreStats::new(mip_stats_cfg.clone()), - }; - - // This fails because try_from use update_with - let _vs_raw_2_ = MipStoreRaw::try_from(( - [ - (vi_1.clone(), vs_1.clone()), - (vi_2_2.clone(), vs_2_2.clone()), - ], - mip_stats_cfg.clone(), - )); - assert_eq!(_vs_raw_2_.is_err(), true); - - assert_eq!(vs_raw_1.update_with(&vs_raw_2), Err(())); - assert_eq!(vs_raw_1.store.get(&vi_1).unwrap().state, vs_1.state); - assert_eq!(vs_raw_1.store.get(&vi_2).unwrap().state, vs_2.state); - - // 2- overlapping component version - let mut vs_raw_1 = MipStoreRaw::try_from(( - [(vi_1.clone(), vs_1.clone()), (vi_2.clone(), vs_2.clone())], - mip_stats_cfg.clone(), - )) - .unwrap(); - - let mut vi_2_2 = vi_2.clone(); - vi_2_2.components = vi_1.components.clone(); - - let vs_2_2 = advance_state_until(ComponentState::defined(), &vi_2_2); - let vs_raw_2 = MipStoreRaw { - store: BTreeMap::from([ - (vi_1.clone(), vs_1.clone()), - (vi_2_2.clone(), vs_2_2.clone()), - ]), - stats: MipStoreStats::new(mip_stats_cfg.clone()), - }; - - assert_eq!(vs_raw_1.update_with(&vs_raw_2), Err(())); - } - - #[test] - fn test_empty_mip_store() { - // Test if we can init an empty MipStore - - let mip_stats_config = MipStatsConfig { - block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, - counters_max: MIP_STORE_STATS_COUNTERS_MAX, - }; - - let mip_store = MipStore::try_from(([], mip_stats_config)); - assert_eq!(mip_store.is_ok(), true); - } -} diff --git a/massa-versioning-worker/Cargo.toml b/massa-versioning/Cargo.toml similarity index 65% rename from massa-versioning-worker/Cargo.toml rename to massa-versioning/Cargo.toml index 99f22bace2b..5df511772b0 100644 --- a/massa-versioning-worker/Cargo.toml +++ b/massa-versioning/Cargo.toml @@ -1,26 +1,30 @@ [package] -name = "massa_versioning_worker" -version = "0.1.0" +name = "massa_versioning" +version = "0.23.0" authors = ["Massa Labs "] edition = "2021" [dependencies] # This is from: https://github.com/rust-bakery/machine/pull/22 -machine = { git = "https://github.com/antifuchs/machine", branch="fix-workspace-build" } +machine = { git = "https://github.com/antifuchs/machine", branch = "fix-workspace-build" } parking_lot = "0.12" thiserror = "1.0" num_enum = "0.5" -nom = "7.1" +nom = "=7.1" tracing = "0.1" # custom module massa_time = { path = "../massa-time" } massa_models = { path = "../massa-models" } massa_serialization = { path = "../massa-serialization" } +massa_hash = { path = "../massa-hash" } +massa_signature = { path = "../massa-signature" } +massa_proto = { path = "../massa-proto" } +massa_db = { path = "../massa-db" } [dev-dependencies] -chrono = "0.4" more-asserts = "0.3" +tempfile = "3.5" [features] testing = [] diff --git a/massa-versioning/src/address_factory.rs b/massa-versioning/src/address_factory.rs new file mode 100644 index 00000000000..dac2adde237 --- /dev/null +++ b/massa-versioning/src/address_factory.rs @@ -0,0 +1,58 @@ +use crate::{ + versioning::{MipComponent, MipStore}, + versioning_factory::{FactoryError, FactoryStrategy, VersioningFactory}, +}; +use massa_hash::Hash; +use massa_models::address::{ + Address, SCAddress, SCAddressV0, SCAddressV1, UserAddress, UserAddressV0, UserAddressV1, +}; + +#[derive(Clone)] +pub struct AddressFactory { + pub mip_store: MipStore, +} + +pub enum AddressArgs { + User { hash: Hash }, + SC { hash: Hash }, +} + +impl VersioningFactory for AddressFactory { + type Output = Address; + type Error = FactoryError; + type Arguments = AddressArgs; + + fn get_component() -> MipComponent { + MipComponent::Address + } + + fn get_versioning_store(&self) -> MipStore { + self.mip_store.clone() + } + + fn create( + &self, + args: &Self::Arguments, + strategy: FactoryStrategy, + ) -> Result { + let version = self.get_component_version_with_strategy(strategy)?; + + let output: Address = match version { + 0 => match args { + AddressArgs::User { hash } => { + Address::User(UserAddress::UserAddressV0(UserAddressV0(*hash))) + } + AddressArgs::SC { hash } => Address::SC(SCAddress::SCAddressV0(SCAddressV0(*hash))), + }, + 1 => match args { + AddressArgs::User { hash } => { + Address::User(UserAddress::UserAddressV1(UserAddressV1(*hash))) + } + AddressArgs::SC { hash } => Address::SC(SCAddress::SCAddressV1(SCAddressV1(*hash))), + }, + v => return Err(FactoryError::UnimplementedVersion(v)), + }; + + Ok(output) + } +} diff --git a/massa-versioning/src/grpc_mapping.rs b/massa-versioning/src/grpc_mapping.rs new file mode 100644 index 00000000000..a1966a02cb5 --- /dev/null +++ b/massa-versioning/src/grpc_mapping.rs @@ -0,0 +1,47 @@ +// Copyright (c) 2023 MASSA LABS +use crate::versioning::{ComponentStateTypeId, MipComponent, MipInfo}; +use massa_proto::massa::api::v1 as grpc; + +impl From<&ComponentStateTypeId> for grpc::ComponentStateId { + fn from(value: &ComponentStateTypeId) -> Self { + match value { + ComponentStateTypeId::Error => grpc::ComponentStateId::Error, + ComponentStateTypeId::Defined => grpc::ComponentStateId::Defined, + ComponentStateTypeId::Started => grpc::ComponentStateId::Started, + ComponentStateTypeId::LockedIn => grpc::ComponentStateId::Lockedin, + ComponentStateTypeId::Active => grpc::ComponentStateId::Active, + ComponentStateTypeId::Failed => grpc::ComponentStateId::Failed, + } + } +} + +impl From<&MipComponent> for grpc::MipComponent { + fn from(value: &MipComponent) -> Self { + match value { + MipComponent::KeyPair => grpc::MipComponent::Keypair, + MipComponent::Address => grpc::MipComponent::Address, + _ => grpc::MipComponent::Unspecified, + } + } +} + +impl From<&MipInfo> for grpc::MipInfo { + fn from(value: &MipInfo) -> Self { + let components = value + .components + .iter() + .map(|(mip_component, version)| grpc::MipComponentEntry { + kind: grpc::MipComponent::from(mip_component).into(), + version: *version, + }) + .collect(); + Self { + name: value.name.clone(), + version: value.version, + start: value.start.to_millis(), + timeout: value.start.to_millis(), + activation_delay: value.activation_delay.to_millis(), + components, + } + } +} diff --git a/massa-versioning/src/keypair_factory.rs b/massa-versioning/src/keypair_factory.rs new file mode 100644 index 00000000000..d146b37e4c0 --- /dev/null +++ b/massa-versioning/src/keypair_factory.rs @@ -0,0 +1,38 @@ +use massa_signature::KeyPair; + +use crate::{ + versioning::{MipComponent, MipStore}, + versioning_factory::{FactoryError, FactoryStrategy, VersioningFactory}, +}; + +#[derive(Clone)] +pub struct KeyPairFactory { + pub mip_store: MipStore, +} + +impl VersioningFactory for KeyPairFactory { + type Output = KeyPair; + type Error = FactoryError; + type Arguments = (); + + fn get_component() -> MipComponent { + MipComponent::KeyPair + } + + fn get_versioning_store(&self) -> MipStore { + self.mip_store.clone() + } + + fn create( + &self, + _args: &Self::Arguments, + strategy: FactoryStrategy, + ) -> Result { + let version = self.get_component_version_with_strategy(strategy)?; + + let output = KeyPair::generate(version.into()) + .map_err(|_| FactoryError::UnimplementedVersion(version))?; + + Ok(output) + } +} diff --git a/massa-versioning-worker/src/lib.rs b/massa-versioning/src/lib.rs similarity index 69% rename from massa-versioning-worker/src/lib.rs rename to massa-versioning/src/lib.rs index 8f691fcc434..0af2efe14f1 100644 --- a/massa-versioning-worker/src/lib.rs +++ b/massa-versioning/src/lib.rs @@ -1,4 +1,5 @@ #![feature(variant_count)] +#![feature(assert_matches)] // Copyright (c) 2022 MASSA LABS //! # General description @@ -8,13 +9,23 @@ //! MIPState -> Deployment state of a MIPInfo //! MIPStore -> A map of MIPInfo -> MipState //! -//! # Note on MipInfo versions //! -//! There is 2 different versions: +//! # Notes on MipInfo versions +//! +//! There is 2 different 'versions': //! * version == Network version -> This is the network version to announce and thus to store in block header -//! * component_version -> This is the version for the associated component and is used in VersioningFactory +//! * component_version -> This is the version for the associated component and is used in VersioningFactory (e.g KeyPair, Address, VM) +//! +//! # Notes on MipInfo timings and stats +//! +//! So in the execution module and after a block become final, we update the MipStore stats (in a blocking way). +//! By updating the stats, we mean sending: (Slot timestamp, Option<(current: u32, advertising: Option)>). +//! Using the slot timestamp, ensure that the trigger (and the trigger timeout) is a consensus by all nodes +//! (This means that the trigger and the trigger timeout are not timer based). +//! In order to have all nodes in sync (because of node various delays), the state is set to active +//! after an activation delay (duration is required to be > 1 cycle). //! -//! # Note on MipState +//! # Notes on MipState //! //! MipState has: //! * A state machine (stores the current state of deployment for a MipInfo) @@ -31,12 +42,7 @@ //! * + When we init MipStore (at startup), this ensures that we have a time ranges & versions coherent list of MipInfo //! * For instance, this can avoid to have 2 MipInfo with the same name //! -//! # Advancing MipState -//! -//! The Versioning middleware is designed to process all finalized blocks and update the corresponding state -//! **It is not yet implemented** -//! -//! # VersioningFactory +//! # Versioning Factory //! //! A Factory trait is there to ease the development of factory for Versioned component (e.g. address, block) //! @@ -45,6 +51,10 @@ //! //! Unit tests in versioning_factory.rs shows a basic but realistic implementation of a AddressFactory (impl the Factory trait) +pub mod address_factory; +pub mod grpc_mapping; +pub mod keypair_factory; +pub mod mips; pub mod versioning; pub mod versioning_factory; pub mod versioning_ser_der; diff --git a/massa-versioning/src/mips.rs b/massa-versioning/src/mips.rs new file mode 100644 index 00000000000..2230c7573fa --- /dev/null +++ b/massa-versioning/src/mips.rs @@ -0,0 +1,20 @@ +#[allow(unused_imports)] +use crate::versioning::{MipComponent, MipInfo, MipState}; + +pub const MIP_LIST: [(MipInfo, MipState); 0] = [ + // MIP placeholder +/* ( + MipInfo { + name: "MIP-0000".to_string(), + version: 0, + components: BTreeMap::from([ + (MipComponent::Address, 0), + (MipComponent::KeyPair, 0), + ]), + start: MassaTime::from_millis(0), + timeout: MassaTime::from_millis(0), + activation_delay: MassaTime::from_millis(0), + }, + MipState::new(MassaTime::from_millis(0)), +) */ +]; diff --git a/massa-versioning-worker/src/test_helpers/mod.rs b/massa-versioning/src/test_helpers/mod.rs similarity index 100% rename from massa-versioning-worker/src/test_helpers/mod.rs rename to massa-versioning/src/test_helpers/mod.rs diff --git a/massa-versioning-worker/src/test_helpers/versioning_helpers.rs b/massa-versioning/src/test_helpers/versioning_helpers.rs similarity index 82% rename from massa-versioning-worker/src/test_helpers/versioning_helpers.rs rename to massa-versioning/src/test_helpers/versioning_helpers.rs index 51e39c2874a..a14b1caa669 100644 --- a/massa-versioning-worker/src/test_helpers/versioning_helpers.rs +++ b/massa-versioning/src/test_helpers/versioning_helpers.rs @@ -13,10 +13,13 @@ pub fn advance_state_until(at_state: ComponentState, versioning_info: &MipInfo) let timeout = versioning_info.timeout; if matches!(at_state, ComponentState::Error) { - todo!() + return MipState { + state: ComponentState::error(), + history: Default::default(), + }; } - let mut state = MipState::new(start.saturating_sub(MassaTime::from(1))); + let mut state = MipState::new(start.saturating_sub(MassaTime::from_millis(1))); if matches!(at_state, ComponentState::Defined(_)) { return state; @@ -26,7 +29,7 @@ pub fn advance_state_until(at_state: ComponentState, versioning_info: &MipInfo) start_timestamp: start, timeout, threshold: Default::default(), - now: start.saturating_add(MassaTime::from(1)), + now: start.saturating_add(MassaTime::from_millis(1)), activation_delay: versioning_info.activation_delay, }; state.on_advance(&advance_msg); @@ -36,12 +39,12 @@ pub fn advance_state_until(at_state: ComponentState, versioning_info: &MipInfo) } if matches!(at_state, ComponentState::Failed(_)) { - advance_msg.now = timeout.saturating_add(MassaTime::from(1)); + advance_msg.now = timeout.saturating_add(MassaTime::from_millis(1)); state.on_advance(&advance_msg); return state; } - advance_msg.now = start.saturating_add(MassaTime::from(2)); + advance_msg.now = start.saturating_add(MassaTime::from_millis(2)); advance_msg.threshold = VERSIONING_THRESHOLD_TRANSITION_ACCEPTED; state.on_advance(&advance_msg); @@ -52,7 +55,7 @@ pub fn advance_state_until(at_state: ComponentState, versioning_info: &MipInfo) advance_msg.now = advance_msg .now .saturating_add(versioning_info.activation_delay) - .saturating_add(MassaTime::from(1)); + .saturating_add(MassaTime::from_millis(1)); state.on_advance(&advance_msg); // Active diff --git a/massa-versioning/src/versioning.rs b/massa-versioning/src/versioning.rs new file mode 100644 index 00000000000..04b2cfe5431 --- /dev/null +++ b/massa-versioning/src/versioning.rs @@ -0,0 +1,2217 @@ +use std::cmp::Ordering; +use std::collections::{BTreeMap, BTreeSet, HashMap, VecDeque}; +use std::ops::Deref; +use std::sync::Arc; + +use machine::{machine, transitions}; +use num_enum::{FromPrimitive, IntoPrimitive, TryFromPrimitive}; +use parking_lot::RwLock; +use thiserror::Error; +use tracing::{debug, warn}; + +use massa_db::{ + DBBatch, MassaDB, MIP_STORE_PREFIX, MIP_STORE_STATS_PREFIX, STATE_CF, VERSIONING_CF, +}; +use massa_models::config::{MIP_STORE_STATS_BLOCK_CONSIDERED, MIP_STORE_STATS_COUNTERS_MAX}; +use massa_models::error::ModelsError; +use massa_models::slot::Slot; +use massa_models::timeslots::get_block_slot_timestamp; +use massa_models::{amount::Amount, config::VERSIONING_THRESHOLD_TRANSITION_ACCEPTED}; +use massa_serialization::{DeserializeError, Deserializer, SerializeError, Serializer}; +use massa_time::MassaTime; + +use crate::versioning_ser_der::{ + MipInfoDeserializer, MipInfoSerializer, MipStateDeserializer, MipStateSerializer, + MipStoreStatsDeserializer, MipStoreStatsSerializer, +}; + +/// Versioning component enum +#[allow(missing_docs)] +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, FromPrimitive, IntoPrimitive)] +#[repr(u32)] +pub enum MipComponent { + // Address and KeyPair versions are directly related + Address, + KeyPair, + Block, + VM, + #[doc(hidden)] + #[num_enum(default)] + __Nonexhaustive, +} + +/// MIP info (name & versions & time range for a MIP) +#[derive(Clone, Debug)] +pub struct MipInfo { + /// MIP name or descriptive name + pub name: String, + /// Network (or global) version (to be included in block header) + pub version: u32, + /// Components concerned by this versioning (e.g. a new Block version), and the associated component_version + pub components: BTreeMap, + /// a timestamp at which the version gains its meaning (e.g. announced in block header) + pub start: MassaTime, + /// a timestamp at the which the deployment is considered failed + pub timeout: MassaTime, + /// Once deployment has been locked, wait for this duration before deployment is considered active + pub activation_delay: MassaTime, +} + +// Need Ord / PartialOrd so it is properly sorted in BTreeMap + +impl Ord for MipInfo { + fn cmp(&self, other: &Self) -> Ordering { + (self.start, &self.timeout).cmp(&(other.start, &other.timeout)) + } +} + +impl PartialOrd for MipInfo { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for MipInfo { + fn eq(&self, other: &Self) -> bool { + self.name == other.name + && self.version == other.version + && self.components == other.components + && self.start == other.start + && self.timeout == other.timeout + && self.activation_delay == other.activation_delay + } +} + +impl Eq for MipInfo {} + +machine!( + /// State machine for a Versioning component that tracks the deployment state + #[derive(Clone, Copy, Debug, PartialEq)] + pub(crate) enum ComponentState { + /// Initial state + Defined, + /// Past start, can only go to LockedIn after the threshold is above a given value + Started { pub(crate) threshold: Amount }, + /// Locked but wait for some time before going to active (to let users the time to upgrade) + LockedIn { pub(crate) at: MassaTime }, + /// After LockedIn, deployment is considered successful (after activation delay) + Active { pub(crate) at: MassaTime }, + /// Past the timeout, if LockedIn is not reach + Failed, + } +); + +impl Default for ComponentState { + fn default() -> Self { + Self::Defined(Defined {}) + } +} + +#[allow(missing_docs)] +#[derive(IntoPrimitive, Debug, Clone, Eq, PartialEq, TryFromPrimitive, PartialOrd, Ord)] +#[repr(u32)] +pub enum ComponentStateTypeId { + Error = 0, + Defined = 1, + Started = 2, + LockedIn = 3, + Active = 4, + Failed = 5, +} + +impl From<&ComponentState> for ComponentStateTypeId { + fn from(value: &ComponentState) -> Self { + match value { + ComponentState::Error => ComponentStateTypeId::Error, + ComponentState::Defined(_) => ComponentStateTypeId::Defined, + ComponentState::Started(_) => ComponentStateTypeId::Started, + ComponentState::LockedIn(_) => ComponentStateTypeId::LockedIn, + ComponentState::Active(_) => ComponentStateTypeId::Active, + ComponentState::Failed(_) => ComponentStateTypeId::Failed, + } + } +} + +/// A message to update the `ComponentState` +#[derive(Clone, Debug)] +pub struct Advance { + /// from MipInfo.start + pub start_timestamp: MassaTime, + /// from MipInfo.timeout + pub timeout: MassaTime, + /// from MipInfo.activation_delay + pub activation_delay: MassaTime, + + /// % of past blocks with this version + pub threshold: Amount, + /// Current time (timestamp) + pub now: MassaTime, +} + +// Need Ord / PartialOrd so it is properly sorted in BTreeMap + +impl Ord for Advance { + fn cmp(&self, other: &Self) -> Ordering { + (self.now).cmp(&other.now) + } +} + +impl PartialOrd for Advance { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for Advance { + fn eq(&self, other: &Self) -> bool { + self.start_timestamp == other.start_timestamp + && self.timeout == other.timeout + && self.threshold == other.threshold + && self.now == other.now + } +} + +impl Eq for Advance {} + +transitions!(ComponentState, + [ + (Defined, Advance) => [Defined, Started, Failed], + (Started, Advance) => [Started, LockedIn, Failed], + (LockedIn, Advance) => [LockedIn, Active], + (Active, Advance) => Active, + (Failed, Advance) => Failed + ] +); + +impl Defined { + /// Update state from state Defined + pub fn on_advance(self, input: Advance) -> ComponentState { + match input.now { + n if n >= input.timeout => ComponentState::failed(), + n if n >= input.start_timestamp => ComponentState::started(Amount::zero()), + _ => ComponentState::Defined(Defined {}), + } + } +} + +impl Started { + /// Update state from state Started + pub fn on_advance(self, input: Advance) -> ComponentState { + if input.now > input.timeout { + return ComponentState::failed(); + } + + if input.threshold >= VERSIONING_THRESHOLD_TRANSITION_ACCEPTED { + debug!("(VERSIONING LOG) transition accepted, locking in"); + ComponentState::locked_in(input.now) + } else { + ComponentState::started(input.threshold) + } + } +} + +impl LockedIn { + /// Update state from state LockedIn ... + pub fn on_advance(self, input: Advance) -> ComponentState { + if input.now > self.at.saturating_add(input.activation_delay) { + debug!("(VERSIONING LOG) locked version has become active"); + ComponentState::active(input.now) + } else { + ComponentState::locked_in(self.at) + } + } +} + +impl Active { + /// Update state (will always stay in state Active) + pub fn on_advance(self, _input: Advance) -> Active { + Active { at: self.at } + } +} + +impl Failed { + /// Update state (will always stay in state Failed) + pub fn on_advance(self, _input: Advance) -> Failed { + Failed {} + } +} + +/// Error returned by `MipState::is_coherent_with` +#[derive(Error, Debug, PartialEq)] +pub enum IsCoherentError { + // State is not coherent with associated MipInfo, ex: State is active but MipInfo.start was not reach yet + #[error("MipState history is empty")] + EmptyHistory, + #[error("MipState is at state Error")] + AtError, + #[error("History must start at state 'Defined' and not {0:?}")] + InvalidHistory(ComponentStateTypeId), + #[error("Non coherent state: {0:?} versus rebuilt state: {1:?}")] + NonCoherent(ComponentState, ComponentState), +} + +/// Wrapper of ComponentState (in order to keep state history) +#[derive(Debug, Clone, PartialEq)] +pub struct MipState { + pub(crate) state: ComponentState, + pub(crate) history: BTreeMap, +} + +impl MipState { + /// Create + pub fn new(defined: MassaTime) -> Self { + let state: ComponentState = Default::default(); // Default is Defined + let state_id = ComponentStateTypeId::from(&state); + // Build a 'dummy' advance msg for state Defined, this is to avoid using an + // Option in MipStateHistory::history + let advance = Advance { + start_timestamp: MassaTime::from_millis(0), + timeout: MassaTime::from_millis(0), + threshold: Default::default(), + now: defined, + activation_delay: MassaTime::from_millis(0), + }; + + let history = BTreeMap::from([(advance, state_id)]); + Self { state, history } + } + + /// Create a new state from an existing state - resulting state will be at state "Defined" + pub fn reset_from(&self) -> Option { + match self.history.first_key_value() { + Some((advance, state_id)) if *state_id == ComponentStateTypeId::Defined => { + Some(MipState::new(advance.now)) + } + _ => None, + } + } + + /// Advance the state + /// Can be called as multiple times as it will only store what changes the state in history + pub fn on_advance(&mut self, input: &Advance) { + let now = input.now; + // Check that input.now is after last item in history + // We don't want to go backward + let is_forward = self + .history + .last_key_value() + .map(|(adv, _)| adv.now < now) + .unwrap_or(false); + + if is_forward { + // machines crate (for state machine) does not support passing ref :-/ + let state = self.state.on_advance(input.clone()); + // Update history as well + if state != self.state { + let state_id = ComponentStateTypeId::from(&state); + + // Avoid storing too much things in history + // Here we avoid storing for every threshold update + if !(matches!(state, ComponentState::Started(Started { .. })) + && matches!(self.state, ComponentState::Started(Started { .. }))) + { + self.history.insert(input.clone(), state_id); + } + self.state = state; + } + } + } + + /// Given a corresponding MipInfo, check if state is coherent + /// it is coherent + /// if state can be at this position (e.g. can it be at state "Started" according to given time range) + /// if history is coherent with current state + /// Return false for state == ComponentState::Error + pub fn is_coherent_with(&self, mip_info: &MipInfo) -> Result<(), IsCoherentError> { + // Always return false for state Error or if history is empty + if matches!(&self.state, &ComponentState::Error) { + return Err(IsCoherentError::AtError); + } + + if self.history.is_empty() { + return Err(IsCoherentError::EmptyHistory); + } + + // safe to unwrap (already tested if empty or not) + let (initial_ts, initial_state_id) = self.history.first_key_value().unwrap(); + if *initial_state_id != ComponentStateTypeId::Defined { + // self.history does not start with Defined -> (always) false + return Err(IsCoherentError::InvalidHistory(initial_state_id.clone())); + } + + // Build a new MipStateHistory from initial state, replaying the whole history + // but with given versioning info then compare + let mut vsh = MipState::new(initial_ts.now); + let mut advance_msg = Advance { + start_timestamp: mip_info.start, + timeout: mip_info.timeout, + threshold: Amount::zero(), + now: initial_ts.now, + activation_delay: mip_info.activation_delay, + }; + + for (adv, _state) in self.history.iter().skip(1) { + advance_msg.now = adv.now; + advance_msg.threshold = adv.threshold; + vsh.on_advance(&advance_msg); + } + + // Advance state if both are at 'Started' (to have the same threshold) + // Note: because in history we do not add entries for every threshold update + if let ( + ComponentState::Started(Started { threshold }), + ComponentState::Started(Started { + threshold: threshold_2, + }), + ) = (vsh.state, self.state) + { + if threshold_2 != threshold { + advance_msg.threshold = threshold_2; + // Need to advance now timestamp otherwise it will be ignored + advance_msg.now = advance_msg.now.saturating_add(MassaTime::from_millis(1)); + vsh.on_advance(&advance_msg); + } + } + + if vsh == *self { + Ok(()) + } else { + Err(IsCoherentError::NonCoherent(self.state, vsh.state)) + } + } + + /// Query state at given timestamp + /// TODO: add doc for start & timeout parameter? why do we need them? + pub fn state_at( + &self, + ts: MassaTime, + start: MassaTime, + timeout: MassaTime, + ) -> Result { + if self.history.is_empty() { + return Err(StateAtError::EmptyHistory); + } + + // Optim: this avoids iterating over history (cheap to retrieve first item) + let first = self.history.first_key_value().unwrap(); // safe to unwrap + if ts < first.0.now { + // Before initial state + return Err(StateAtError::BeforeInitialState(first.1.clone(), ts)); + } + + // At this point, we are >= the first state in history + let mut lower_bound = None; + let mut higher_bound = None; + let mut is_after_last = false; + + // Optim: this avoids iterating over history (cheap to retrieve first item) + let last = self.history.last_key_value().unwrap(); // safe to unwrap + if ts > last.0.now { + lower_bound = Some(last); + is_after_last = true; + } + + if !is_after_last { + // We are in between two states in history, find bounds + for (adv, state_id) in self.history.iter() { + if adv.now <= ts { + lower_bound = Some((adv, state_id)); + } + if adv.now >= ts && higher_bound.is_none() { + higher_bound = Some((adv, state_id)); + break; + } + } + } + + match (lower_bound, higher_bound) { + (Some((_adv_1, st_id_1)), Some((_adv_2, _st_id_2))) => { + // Between 2 states (e.g. between Defined && Started) -> return Defined + Ok(st_id_1.clone()) + } + (Some((adv, st_id)), None) => { + // After the last state in history -> need to advance the state and return + let threshold_for_transition = VERSIONING_THRESHOLD_TRANSITION_ACCEPTED; + // Note: Please update this if MipState transitions change as it might not hold true + if *st_id == ComponentStateTypeId::Started + && adv.threshold < threshold_for_transition + && ts < adv.timeout + { + Err(StateAtError::Unpredictable) + } else { + let msg = Advance { + start_timestamp: start, + timeout, + threshold: adv.threshold, + now: ts, + activation_delay: adv.activation_delay, + }; + // Return the resulting state after advance + let state = self.state.on_advance(msg); + Ok(ComponentStateTypeId::from(&state)) + } + } + _ => { + // 1. Before the first state in history: already covered + // 2. None, None: already covered - empty history + Err(StateAtError::EmptyHistory) + } + } + } + + /// Return the time when state will go from LockedIn to Active, None if not already LockedIn + pub fn activation_at(&self, mip_info: &MipInfo) -> Option { + match self.state { + ComponentState::LockedIn(LockedIn { at }) => { + Some(at.saturating_add(mip_info.activation_delay)) + } + _ => None, + } + } +} + +/// Error returned by MipStateHistory::state_at +#[allow(missing_docs)] +#[derive(Error, Debug, PartialEq)] +pub enum StateAtError { + #[error("Initial state ({0:?}) only defined after timestamp: {1}")] + BeforeInitialState(ComponentStateTypeId, MassaTime), + #[error("Empty history, should never happen")] + EmptyHistory, + #[error("Cannot predict in the future (~ threshold not reach yet)")] + Unpredictable, +} + +// Store + +/// Database for all MIP info +#[derive(Debug, Clone)] +pub struct MipStore(pub Arc>); + +impl MipStore { + /// Retrieve the current network version to set in block header + pub fn get_network_version_current(&self) -> u32 { + let lock = self.0.read(); + let store = lock.deref(); + // Current version == last active + store + .store + .iter() + .rev() + .find_map(|(k, v)| (matches!(v.state, ComponentState::Active(_))).then_some(k.version)) + .unwrap_or(0) + } + + /// Retrieve the last active version at the given timestamp + pub fn get_network_version_active_at(&self, ts: MassaTime) -> u32 { + let lock = self.0.read(); + let store = lock.deref(); + store + .store + .iter() + .rev() + .find_map(|(k, v)| match v.state { + ComponentState::Active(Active { at }) if at <= ts => Some(k.version), + _ => None, + }) + .unwrap_or(0) + } + + /// Retrieve the network version number to announce in block header + /// return 0 is there is nothing to announce + pub fn get_network_version_to_announce(&self) -> u32 { + let lock = self.0.read(); + let store = lock.deref(); + // Announce the latest versioning info in Started / LockedIn state + // Defined == Not yet ready to announce + // Active == current version + store + .store + .iter() + .rev() + .find_map(|(k, v)| { + matches!( + &v.state, + &ComponentState::Started(_) | &ComponentState::LockedIn(_) + ) + .then_some(k.version) + }) + .unwrap_or(0) + } + + pub fn update_network_version_stats( + &mut self, + slot_timestamp: MassaTime, + network_versions: Option<(u32, u32)>, + ) { + let mut lock = self.0.write(); + lock.update_network_version_stats(slot_timestamp, network_versions); + } + + #[allow(clippy::result_large_err)] + pub fn update_with( + &mut self, + mip_store: &MipStore, + ) -> Result<(Vec, BTreeMap), UpdateWithError> { + let mut lock = self.0.write(); + let lock_other = mip_store.0.read(); + lock.update_with(lock_other.deref()) + } + + // GRPC + + /// Retrieve a list of MIP info with their corresponding state (as id) - used for grpc API + pub fn get_mip_status(&self) -> BTreeMap { + let guard = self.0.read(); + guard + .store + .iter() + .map(|(mip_info, mip_state)| { + ( + mip_info.clone(), + ComponentStateTypeId::from(&mip_state.state), + ) + }) + .collect() + } + + // Network restart + pub fn is_coherent_with_shutdown_period( + &self, + shutdown_start: Slot, + shutdown_end: Slot, + thread_count: u8, + t0: MassaTime, + genesis_timestamp: MassaTime, + ) -> Result { + let guard = self.0.read(); + guard.is_coherent_with_shutdown_period( + shutdown_start, + shutdown_end, + thread_count, + t0, + genesis_timestamp, + ) + } + + // DB + pub fn update_batches( + &self, + db_batch: &mut DBBatch, + db_versioning_batch: &mut DBBatch, + between: (&MassaTime, &MassaTime), + ) -> Result<(), SerializeError> { + let guard = self.0.read(); + guard.update_batches(db_batch, db_versioning_batch, between) + } + + pub fn extend_from_db( + &mut self, + db: Arc>, + ) -> Result<(Vec, BTreeMap), ExtendFromDbError> { + let mut guard = self.0.write(); + guard.extend_from_db(db) + } + + pub fn reset_db(&self, db: Arc>) { + { + let mut guard = db.write(); + guard.delete_prefix(MIP_STORE_PREFIX, STATE_CF, None); + guard.delete_prefix(MIP_STORE_PREFIX, VERSIONING_CF, None); + guard.delete_prefix(MIP_STORE_STATS_PREFIX, VERSIONING_CF, None); + } + } +} + +impl TryFrom<([(MipInfo, MipState); N], MipStatsConfig)> for MipStore { + type Error = UpdateWithError; + + fn try_from( + (value, cfg): ([(MipInfo, MipState); N], MipStatsConfig), + ) -> Result { + MipStoreRaw::try_from((value, cfg)).map(|store_raw| Self(Arc::new(RwLock::new(store_raw)))) + } +} + +/// Statistics in MipStoreRaw +#[derive(Debug, Clone, PartialEq)] +pub struct MipStatsConfig { + pub block_count_considered: usize, + pub counters_max: usize, +} + +/// In order for a MIP to be accepted, we compute statistics about other node 'network' version announcement +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct MipStoreStats { + // config for max counters + block to consider when computing the vote ratio + pub(crate) config: MipStatsConfig, + // used to clean up the counters (pop the oldest then subtract matching counter) + pub(crate) latest_announcements: VecDeque, + // counter per network version + pub(crate) network_version_counters: BTreeMap, +} + +impl MipStoreStats { + pub(crate) fn new(config: MipStatsConfig) -> Self { + Self { + config: config.clone(), + latest_announcements: VecDeque::with_capacity(config.block_count_considered), + network_version_counters: Default::default(), + } + } + + // reset stats - used in `update_for_network_shutdown` function + fn reset(&mut self) { + self.latest_announcements.clear(); + self.network_version_counters.clear(); + } +} + +/// Error returned by `MipStoreRaw::update_with` +#[derive(Error, Debug, PartialEq)] +pub enum UpdateWithError { + // State is not coherent with associated MipInfo, ex: State is active but MipInfo.start was not reach yet + #[error("MipInfo {0:#?} is not coherent with state: {1:#?}, error: {2}")] + NonCoherent(MipInfo, MipState, IsCoherentError), + // ex: State is already started but received state is only defined + #[error("For MipInfo {0:?}, trying to downgrade from state {1:?} to {2:?}")] + Downgrade(MipInfo, ComponentState, ComponentState), + // ex: MipInfo 2 start is before MipInfo 1 timeout (MipInfo timings should only be sequential) + #[error("MipInfo {0:?} has overlapping data of MipInfo {1:?}")] + Overlapping(MipInfo, MipInfo), +} + +/// Error returned by 'extend_from_db` +#[derive(Error, Debug)] +pub enum ExtendFromDbError { + #[error("Unable to get an handle over db column: {0}")] + UnknownDbColumn(String), + #[error("{0}")] + Update(#[from] UpdateWithError), + #[error("{0}")] + Deserialize(String), +} + +/// Store of all versioning info +#[derive(Debug, Clone, PartialEq)] +pub struct MipStoreRaw { + pub(crate) store: BTreeMap, + pub(crate) stats: MipStoreStats, +} + +impl MipStoreRaw { + /// Update our store with another (usually after a bootstrap where we received another store) + /// Return list of updated / added if successful, UpdateWithError otherwise + #[allow(clippy::result_large_err)] + pub fn update_with( + &mut self, + store_raw: &MipStoreRaw, + ) -> Result<(Vec, BTreeMap), UpdateWithError> { + // iter over items in given store: + // -> 2 cases: + // * MipInfo is already in self store -> add to 'to_update' list + // * MipInfo is not in self.store -> We received a new MipInfo so we are running an out dated version + // of the software + // We then return the list of new MipInfo so we can warn and ask + // to update the software + + let mut component_versions: HashMap = self + .store + .iter() + .flat_map(|c| { + c.0.components + .iter() + .map(|(mip_component, component_version)| { + (mip_component.clone(), *component_version) + }) + }) + .collect(); + let mut names: BTreeSet = self.store.iter().map(|i| i.0.name.clone()).collect(); + let mut to_update: BTreeMap = Default::default(); + let mut to_add: BTreeMap = Default::default(); + let mut has_error: Option = None; + + for (v_info, v_state) in store_raw.store.iter() { + if let Err(e) = v_state.is_coherent_with(v_info) { + // As soon as we found one non coherent state we abort the merge + has_error = Some(UpdateWithError::NonCoherent( + v_info.clone(), + v_state.clone(), + e, + )); + break; + } + + if let Some(v_state_orig) = self.store.get(v_info) { + // Versioning info (from right) is already in self (left) + // Need to check if we add this to 'to_update' list + let v_state_id: u32 = ComponentStateTypeId::from(&v_state.state).into(); + let v_state_orig_id: u32 = ComponentStateTypeId::from(&v_state_orig.state).into(); + + // Note: we do not check for state: active OR failed OR error as they cannot change + if matches!( + v_state_orig.state, + ComponentState::Defined(_) + | ComponentState::Started(_) + | ComponentState::LockedIn(_) + ) { + // Only accept 'higher' state + // (e.g. 'started' if 'defined', 'locked in' if 'started'...) + if v_state_id >= v_state_orig_id { + to_update.insert(v_info.clone(), v_state.clone()); + } else { + // Trying to downgrade state' (e.g. trying to go from 'active' -> 'defined') + has_error = Some(UpdateWithError::Downgrade( + v_info.clone(), + v_state_orig.state, + v_state.state, + )); + break; + } + } + } else { + // Versioning info (from right) is not in self.0 (left) + // Need to check if we add this to 'to_add' list + + let last_v_info_ = to_add + .last_key_value() + .map(|i| i.0) + .or(self.store.last_key_value().map(|i| i.0)); + + if let Some(last_v_info) = last_v_info_ { + // check for versions of all components in v_info + let mut component_version_compatible = true; + for component in v_info.components.iter() { + if component.1 <= component_versions.get(component.0).unwrap_or(&0) { + component_version_compatible = false; + break; + } + } + + if v_info.start > last_v_info.timeout + && v_info.timeout > v_info.start + && v_info.version > last_v_info.version + && !names.contains(&v_info.name) + && component_version_compatible + { + // Time range is ok / version is ok / name is unique, let's add it + to_add.insert(v_info.clone(), v_state.clone()); + names.insert(v_info.name.clone()); + for component in v_info.components.iter() { + component_versions.insert(component.0.clone(), *component.1); + } + } else { + // Something is wrong (time range not ok? / version not incr? / names? + // or component version not incr?) + has_error = Some(UpdateWithError::Overlapping( + v_info.clone(), + last_v_info.clone(), + )); + break; + } + } else { + // to_add is empty && self.0 is empty + to_add.insert(v_info.clone(), v_state.clone()); + names.insert(v_info.name.clone()); + } + } + } + + match has_error { + None => { + let updated: Vec = to_update.keys().cloned().collect(); + + // Note: we only update the store with to_update collection + // having something in the to_add collection means that we need to update + // the Massa node software + self.store.append(&mut to_update); + Ok((updated, to_add)) + } + Some(e) => Err(e), + } + } + + fn update_network_version_stats( + &mut self, + slot_timestamp: MassaTime, + network_versions: Option<(u32, u32)>, + ) { + if let Some((_current_network_version, announced_network_version)) = network_versions { + let removed_version_ = match self.stats.latest_announcements.len() { + n if n >= self.stats.config.block_count_considered => { + self.stats.latest_announcements.pop_front() + } + _ => None, + }; + self.stats + .latest_announcements + .push_back(announced_network_version); + + // We update the count of the received version + let entry_value = self + .stats + .network_version_counters + .entry(announced_network_version) + .or_default(); + *entry_value = entry_value.saturating_add(1); + + if let Some(removed_version) = removed_version_ { + let entry_value = self + .stats + .network_version_counters + .entry(removed_version) + .or_insert(1); + *entry_value = entry_value.saturating_sub(1); + } + + // Cleanup the counters + if self.stats.network_version_counters.len() > self.stats.config.counters_max { + if let Some((version, count)) = self.stats.network_version_counters.pop_first() { + // TODO: return version / count for unit tests? + warn!( + "MipStoreStats removed counter for version {}, count was: {}", + version, count + ) + } + } + + self.advance_states_on_updated_stats(slot_timestamp); + } + } + + /// Used internally by `update_network_version_stats` + fn advance_states_on_updated_stats(&mut self, slot_timestamp: MassaTime) { + for (mi, state) in self.store.iter_mut() { + let network_version_count = *self + .stats + .network_version_counters + .get(&mi.version) + .unwrap_or(&0) as f32; + let block_count_considered = self.stats.config.block_count_considered as f32; + + let vote_ratio_ = 100.0 * network_version_count / block_count_considered; + + let vote_ratio = Amount::from_mantissa_scale(vote_ratio_.round() as u64, 0); + + debug!("(VERSIONING LOG) vote_ratio = {} (from version counter = {} and blocks considered = {})", vote_ratio, network_version_count, block_count_considered); + + let advance_msg = Advance { + start_timestamp: mi.start, + timeout: mi.timeout, + threshold: vote_ratio, + now: slot_timestamp, + activation_delay: mi.activation_delay, + }; + + // TODO / OPTIM: filter the store to avoid advancing on failed and active versions + state.on_advance(&advance_msg.clone()); + } + } + + /// Check if store is coherent with given last network shutdown + /// On a network shutdown, the MIP infos will be edited but we still need to check if this is coherent + fn is_coherent_with_shutdown_period( + &self, + shutdown_start: Slot, + shutdown_end: Slot, + thread_count: u8, + t0: MassaTime, + genesis_timestamp: MassaTime, + ) -> Result { + let mut is_coherent = true; + + let shutdown_start_ts = + get_block_slot_timestamp(thread_count, t0, genesis_timestamp, shutdown_start)?; + let shutdown_end_ts = + get_block_slot_timestamp(thread_count, t0, genesis_timestamp, shutdown_end)?; + let shutdown_range = shutdown_start_ts..=shutdown_end_ts; + + for (mip_info, mip_state) in &self.store { + match mip_state.state { + ComponentState::Defined(..) => { + // all good if it does not start / timeout during shutdown period + if shutdown_range.contains(&mip_info.start) + || shutdown_range.contains(&mip_info.timeout) + { + is_coherent = false; + break; + } + } + ComponentState::Started(..) => { + // assume this should have been reset + is_coherent = false; + break; + } + _ => { + // active / failed, error, nothing to do + // locked in, nothing to do (might go from 'locked in' to 'active' during shutdown) + } + } + } + + Ok(is_coherent) + } + + #[allow(dead_code)] + fn update_for_network_shutdown( + &mut self, + shutdown_start: Slot, + shutdown_end: Slot, + thread_count: u8, + t0: MassaTime, + genesis_timestamp: MassaTime, + ) -> Result<(), ModelsError> { + let shutdown_start_ts = + get_block_slot_timestamp(thread_count, t0, genesis_timestamp, shutdown_start)?; + let shutdown_end_ts = + get_block_slot_timestamp(thread_count, t0, genesis_timestamp, shutdown_end)?; + let shutdown_range = shutdown_start_ts..=shutdown_end_ts; + + let mut new_store: BTreeMap = Default::default(); + let mut new_stats = self.stats.clone(); + new_stats.reset(); + + let next_valid_start_ = shutdown_end.get_next_slot(thread_count)?; + let next_valid_start = + get_block_slot_timestamp(thread_count, t0, genesis_timestamp, next_valid_start_)?; + + let mut offset: Option = None; + + for (mip_info, mip_state) in &self.store { + match mip_state.state { + ComponentState::Defined(..) => { + // Defined: offset start & timeout + + let mut new_mip_info = mip_info.clone(); + + if shutdown_range.contains(&new_mip_info.start) { + let offset_ts = match offset { + Some(offset_ts) => offset_ts, + None => { + let offset_ts = next_valid_start.saturating_sub(mip_info.start); + offset = Some(offset_ts); + offset_ts + } + }; + + new_mip_info.start = new_mip_info.start.saturating_add(offset_ts); + new_mip_info.timeout = new_mip_info + .start + .saturating_add(mip_info.timeout.saturating_sub(mip_info.start)); + } + new_store.insert(new_mip_info, mip_state.clone()); + } + ComponentState::Started(..) | ComponentState::LockedIn(..) => { + // Started or LockedIn -> Reset to Defined, offset start & timeout + + let mut new_mip_info = mip_info.clone(); + + let offset_ts = match offset { + Some(offset_ts) => offset_ts, + None => { + let offset_ts = next_valid_start.saturating_sub(mip_info.start); + offset = Some(offset_ts); + offset_ts + } + }; + + new_mip_info.start = new_mip_info.start.saturating_add(offset_ts); + new_mip_info.timeout = new_mip_info + .start + .saturating_add(mip_info.timeout.saturating_sub(mip_info.start)); + + // Need to reset state to 'Defined' + let new_mip_state = MipState::reset_from(mip_state) + .ok_or(ModelsError::from("Unable to reset state"))?; + // Note: statistics are already reset + new_store.insert(new_mip_info, new_mip_state.clone()); + } + _ => { + // active / failed, error, nothing to do + new_store.insert(mip_info.clone(), mip_state.clone()); + } + } + } + + self.store = new_store; + self.stats = new_stats; + Ok(()) + } + + // DB methods + + /// Get MIP store changes between 2 timestamps - used by the db to update the disk + fn update_batches( + &self, + batch: &mut DBBatch, + versioning_batch: &mut DBBatch, + between: (&MassaTime, &MassaTime), + ) -> Result<(), SerializeError> { + let mip_info_ser = MipInfoSerializer::new(); + let mip_state_ser = MipStateSerializer::new(); + + let bounds = (*between.0)..=(*between.1); + let mut key = Vec::new(); + let mut value = Vec::new(); + + for (mip_info, mip_state) in self.store.iter() { + if let Some((advance, state_id)) = mip_state.history.last_key_value() { + if bounds.contains(&advance.now) { + key.extend(MIP_STORE_PREFIX.as_bytes().to_vec()); + mip_info_ser.serialize(mip_info, &mut key)?; + mip_state_ser.serialize(mip_state, &mut value)?; + match state_id { + ComponentStateTypeId::Active => { + batch.insert(key.clone(), Some(value.clone())); + } + _ => { + versioning_batch.insert(key.clone(), Some(value.clone())); + } + } + key.clear(); + value.clear(); + } + } + } + + key.clear(); + value.clear(); + key.extend(MIP_STORE_STATS_PREFIX.as_bytes().to_vec()); + let mip_stats_ser = MipStoreStatsSerializer::new(); + mip_stats_ser.serialize(&self.stats, &mut value)?; + versioning_batch.insert(key.clone(), Some(value.clone())); + + Ok(()) + } + + /// Extend MIP store with what is written on the disk + fn extend_from_db( + &mut self, + db: Arc>, + ) -> Result<(Vec, BTreeMap), ExtendFromDbError> { + let mip_info_deser = MipInfoDeserializer::new(); + let mip_state_deser = MipStateDeserializer::new(); + let mip_store_stats_deser = MipStoreStatsDeserializer::new( + MIP_STORE_STATS_BLOCK_CONSIDERED, + MIP_STORE_STATS_COUNTERS_MAX, + ); + + let db = db.read(); + let handle = db + .db + .cf_handle(STATE_CF) + .ok_or(ExtendFromDbError::UnknownDbColumn(STATE_CF.to_string()))?; + + // Get data from state cf handle + let mut update_data: BTreeMap = Default::default(); + for (ser_mip_info, ser_mip_state) in + db.db.prefix_iterator_cf(handle, MIP_STORE_PREFIX).flatten() + { + if !ser_mip_info.starts_with(MIP_STORE_PREFIX.as_bytes()) { + break; + } + + // deser + let (_, mip_info) = mip_info_deser + .deserialize::(&ser_mip_info[MIP_STORE_PREFIX.len()..]) + .map_err(|e| ExtendFromDbError::Deserialize(e.to_string()))?; + + let (_, mip_state) = mip_state_deser + .deserialize::(&ser_mip_state) + .map_err(|e| ExtendFromDbError::Deserialize(e.to_string()))?; + + update_data.insert(mip_info, mip_state); + } + + let store_raw_ = MipStoreRaw { + store: update_data, + stats: MipStoreStats { + config: MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }, + latest_announcements: Default::default(), + network_version_counters: Default::default(), + }, + }; + let (mut updated, mut added) = self.update_with(&store_raw_)?; + + let mut update_data: BTreeMap = Default::default(); + let versioning_handle = + db.db + .cf_handle(VERSIONING_CF) + .ok_or(ExtendFromDbError::UnknownDbColumn( + VERSIONING_CF.to_string(), + ))?; + + // Get data from state cf handle + for (ser_mip_info, ser_mip_state) in db + .db + .prefix_iterator_cf(versioning_handle, MIP_STORE_PREFIX) + .flatten() + { + // deser + + match ser_mip_info.as_ref() { + key if key.starts_with(MIP_STORE_PREFIX.as_bytes()) => { + let (_, mip_info) = mip_info_deser + .deserialize::(&ser_mip_info[MIP_STORE_PREFIX.len()..]) + .map_err(|e| ExtendFromDbError::Deserialize(e.to_string()))?; + + let (_, mip_state) = mip_state_deser + .deserialize::(&ser_mip_state) + .map_err(|e| ExtendFromDbError::Deserialize(e.to_string()))?; + + update_data.insert(mip_info, mip_state); + } + key if key.starts_with(MIP_STORE_STATS_PREFIX.as_bytes()) => { + let (_, mip_store_stats) = mip_store_stats_deser + .deserialize::(&ser_mip_state) + .map_err(|e| ExtendFromDbError::Deserialize(e.to_string()))?; + + self.stats = mip_store_stats; + } + _ => { + break; + } + } + } + + let store_raw_ = MipStoreRaw { + store: update_data, + stats: MipStoreStats { + config: MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }, + latest_announcements: Default::default(), + network_version_counters: Default::default(), + }, + }; + let (updated_2, added_2) = self.update_with(&store_raw_)?; + updated.extend(updated_2); + added.extend(added_2); + + Ok((updated, added)) + } +} + +impl TryFrom<([(MipInfo, MipState); N], MipStatsConfig)> for MipStoreRaw { + type Error = UpdateWithError; + + fn try_from( + (value, cfg): ([(MipInfo, MipState); N], MipStatsConfig), + ) -> Result { + // Build an empty store + let mut store = Self { + store: Default::default(), + stats: MipStoreStats::new(cfg.clone()), + }; + + // Build another one with given value + let other_store = Self { + store: BTreeMap::from(value), + stats: MipStoreStats::new(cfg), + }; + + // Use update_with ensuring that we have no overlapping time range, unique names & ... + match store.update_with(&other_store) { + Ok((_updated, mut added)) => { + store.store.append(&mut added); + Ok(store) + } + Err(e) => Err(e), + } + } +} + +// End Store + +#[cfg(test)] +mod test { + use super::*; + + use std::assert_matches::assert_matches; + use std::str::FromStr; + + use massa_db::MassaDBConfig; + use more_asserts::assert_le; + use tempfile::tempdir; + + use crate::test_helpers::versioning_helpers::advance_state_until; + + use massa_models::config::{ + MIP_STORE_STATS_BLOCK_CONSIDERED, MIP_STORE_STATS_COUNTERS_MAX, T0, THREAD_COUNT, + }; + use massa_models::timeslots::get_closest_slot_to_timestamp; + + // Only for unit tests + impl PartialEq for MipState { + fn eq(&self, other: &ComponentState) -> bool { + self.state == *other + } + } + + // helper + impl From<(&MipInfo, &Amount, &MassaTime)> for Advance { + fn from((mip_info, threshold, now): (&MipInfo, &Amount, &MassaTime)) -> Self { + Self { + start_timestamp: mip_info.start, + timeout: mip_info.timeout, + threshold: *threshold, + now: *now, + activation_delay: mip_info.activation_delay, + } + } + } + + fn get_a_version_info() -> (MassaTime, MassaTime, MipInfo) { + // A helper function to provide a default MipInfo + + // Models a Massa Improvements Proposal (MIP-0002), transitioning component address to v2 + + let start = MassaTime::from_utc_ymd_hms(2017, 11, 1, 7, 33, 44).unwrap(); + let timeout = MassaTime::from_utc_ymd_hms(2017, 11, 11, 7, 33, 44).unwrap(); + + return ( + start, + timeout, + MipInfo { + name: "MIP-0002".to_string(), + version: 2, + components: BTreeMap::from([(MipComponent::Address, 1)]), + start, + timeout, + activation_delay: MassaTime::from_millis(20), + }, + ); + } + + #[test] + fn test_state_advance_from_defined() { + // Test Versioning state transition (from state: Defined) + let (_, _, mi) = get_a_version_info(); + let mut state: ComponentState = Default::default(); + assert_eq!(state, ComponentState::defined()); + + let now = mi.start.saturating_sub(MassaTime::from_millis(1)); + let mut advance_msg = Advance::from((&mi, &Amount::zero(), &now)); + + state = state.on_advance(advance_msg.clone()); + assert_eq!(state, ComponentState::defined()); + + let now = mi.start.saturating_add(MassaTime::from_millis(5)); + advance_msg.now = now; + state = state.on_advance(advance_msg); + + // println!("state: {:?}", state); + assert_eq!( + state, + ComponentState::Started(Started { + threshold: Amount::zero() + }) + ); + } + + #[test] + fn test_state_advance_from_started() { + // Test Versioning state transition (from state: Started) + let (_, _, mi) = get_a_version_info(); + let mut state: ComponentState = ComponentState::started(Default::default()); + + let now = mi.start; + let threshold_too_low = VERSIONING_THRESHOLD_TRANSITION_ACCEPTED + .saturating_sub(Amount::from_str("0.1").unwrap()); + let threshold_ok = VERSIONING_THRESHOLD_TRANSITION_ACCEPTED + .saturating_add(Amount::from_str("5.42").unwrap()); + assert_le!(threshold_ok, Amount::from_str("100.0").unwrap()); + let mut advance_msg = Advance::from((&mi, &threshold_too_low, &now)); + + state = state.on_advance(advance_msg.clone()); + assert_eq!(state, ComponentState::started(threshold_too_low)); + advance_msg.threshold = threshold_ok; + state = state.on_advance(advance_msg); + assert_eq!(state, ComponentState::locked_in(now)); + } + + #[test] + fn test_state_advance_from_locked_in() { + // Test Versioning state transition (from state: LockedIn) + let (_, _, mi) = get_a_version_info(); + + let locked_in_at = mi.start.saturating_add(MassaTime::from_millis(1)); + let mut state: ComponentState = ComponentState::locked_in(locked_in_at); + + let now = mi.start; + let mut advance_msg = Advance::from((&mi, &Amount::zero(), &now)); + + state = state.on_advance(advance_msg.clone()); + assert_eq!(state, ComponentState::locked_in(locked_in_at)); + + advance_msg.now = advance_msg + .timeout + .saturating_add(MassaTime::from_millis(1)); + state = state.on_advance(advance_msg); + assert!(matches!(state, ComponentState::Active(_))); + } + + #[test] + fn test_state_advance_from_active() { + // Test Versioning state transition (from state: Active) + let (start, _, mi) = get_a_version_info(); + let mut state = ComponentState::active(start); + let now = mi.start; + let advance = Advance::from((&mi, &Amount::zero(), &now)); + + state = state.on_advance(advance); + assert!(matches!(state, ComponentState::Active(_))); + } + + #[test] + fn test_state_advance_from_failed() { + // Test Versioning state transition (from state: Failed) + let (_, _, mi) = get_a_version_info(); + let mut state = ComponentState::failed(); + let now = mi.start; + let advance = Advance::from((&mi, &Amount::zero(), &now)); + state = state.on_advance(advance); + assert_eq!(state, ComponentState::failed()); + } + + #[test] + fn test_state_advance_to_failed() { + // Test Versioning state transition (to state: Failed) + let (_, _, mi) = get_a_version_info(); + let now = mi.timeout.saturating_add(MassaTime::from_millis(1)); + let advance_msg = Advance::from((&mi, &Amount::zero(), &now)); + + let mut state: ComponentState = Default::default(); // Defined + state = state.on_advance(advance_msg.clone()); + assert_eq!(state, ComponentState::Failed(Failed {})); + + let mut state = ComponentState::started(Default::default()); + state = state.on_advance(advance_msg.clone()); + assert_eq!(state, ComponentState::Failed(Failed {})); + } + + #[test] + fn test_state_with_history() { + // Test MipStateHistory::state_at() function + + let (start, _, mi) = get_a_version_info(); + let now_0 = start; + let mut state = MipState::new(now_0); + + assert_eq!(state, ComponentState::defined()); + + let now = mi.start.saturating_add(MassaTime::from_millis(15)); + let mut advance_msg = Advance::from((&mi, &Amount::zero(), &now)); + + // Move from Defined -> Started + state.on_advance(&advance_msg); + assert_eq!(state, ComponentState::started(Amount::zero())); + + // Check history + assert_eq!(state.history.len(), 2); + assert!(matches!( + state.history.first_key_value(), + Some((&Advance { .. }, &ComponentStateTypeId::Defined)) + )); + assert!(matches!( + state.history.last_key_value(), + Some((&Advance { .. }, &ComponentStateTypeId::Started)) + )); + + // Query with timestamp + + // Before Defined + let state_id_ = state.state_at( + mi.start.saturating_sub(MassaTime::from_millis(5)), + mi.start, + mi.timeout, + ); + assert!(matches!( + state_id_, + Err(StateAtError::BeforeInitialState(_, _)) + )); + // After Defined timestamp + let state_id = state.state_at(mi.start, mi.start, mi.timeout).unwrap(); + assert_eq!(state_id, ComponentStateTypeId::Defined); + // At Started timestamp + let state_id = state.state_at(now, mi.start, mi.timeout).unwrap(); + assert_eq!(state_id, ComponentStateTypeId::Started); + + // After Started timestamp but before timeout timestamp + let after_started_ts = now.saturating_add(MassaTime::from_millis(15)); + let state_id_ = state.state_at(after_started_ts, mi.start, mi.timeout); + assert_eq!(state_id_, Err(StateAtError::Unpredictable)); + + // After Started timestamp and after timeout timestamp + let after_timeout_ts = mi.timeout.saturating_add(MassaTime::from_millis(15)); + let state_id = state + .state_at(after_timeout_ts, mi.start, mi.timeout) + .unwrap(); + assert_eq!(state_id, ComponentStateTypeId::Failed); + + // Move from Started to LockedIn + let threshold = VERSIONING_THRESHOLD_TRANSITION_ACCEPTED; + advance_msg.threshold = threshold.saturating_add(Amount::from_str("1.0").unwrap()); + advance_msg.now = now.saturating_add(MassaTime::from_millis(1)); + state.on_advance(&advance_msg); + assert_eq!(state, ComponentState::locked_in(advance_msg.now)); + + // Query with timestamp + // After LockedIn timestamp and before timeout timestamp + let after_locked_in_ts = now.saturating_add(MassaTime::from_millis(10)); + let state_id = state + .state_at(after_locked_in_ts, mi.start, mi.timeout) + .unwrap(); + assert_eq!(state_id, ComponentStateTypeId::LockedIn); + // After LockedIn timestamp and after timeout timestamp + let state_id = state + .state_at(after_timeout_ts, mi.start, mi.timeout) + .unwrap(); + assert_eq!(state_id, ComponentStateTypeId::Active); + } + + #[test] + fn test_versioning_store_announce_current() { + // Test VersioningInfo::get_version_to_announce() & ::get_version_current() + + let (start, timeout, mi) = get_a_version_info(); + + let mut mi_2 = mi.clone(); + mi_2.version += 1; + mi_2.start = timeout + .checked_add(MassaTime::from_millis(1000 * 60 * 60 * 24 * 2)) + .unwrap(); // Add 2 days + mi_2.timeout = timeout + .checked_add(MassaTime::from_millis(1000 * 60 * 60 * 24 * 5)) + .unwrap(); // Add 5 days + + // Can only build such object in test - history is empty :-/ + let vs_1 = MipState { + state: ComponentState::active(start), + history: Default::default(), + }; + let vs_2 = MipState { + state: ComponentState::started(Amount::zero()), + history: Default::default(), + }; + + // TODO: Have VersioningStore::from ? + let mip_stats_cfg = MipStatsConfig { + block_count_considered: 10, + counters_max: 5, + }; + let vs_raw = MipStoreRaw { + store: BTreeMap::from([(mi.clone(), vs_1), (mi_2.clone(), vs_2)]), + stats: MipStoreStats::new(mip_stats_cfg.clone()), + }; + // let vs_raw = MipStoreRaw::try_from([(vi.clone(), vs_1), (vi_2.clone(), vs_2)]).unwrap(); + let vs = MipStore(Arc::new(RwLock::new(vs_raw))); + + assert_eq!(vs.get_network_version_current(), mi.version); + assert_eq!(vs.get_network_version_to_announce(), mi_2.version); + + // Test also an empty versioning store + let vs_raw = MipStoreRaw { + store: Default::default(), + stats: MipStoreStats::new(mip_stats_cfg), + }; + let vs = MipStore(Arc::new(RwLock::new(vs_raw))); + assert_eq!(vs.get_network_version_current(), 0); + assert_eq!(vs.get_network_version_to_announce(), 0); + } + + #[test] + fn test_is_coherent_with() { + // Test MipStateHistory::is_coherent_with (coherence of MIP state against its MIP info) + + // Given the following MIP info, we expect state + // Defined @ time <= 2 + // Started @ time > 2 && <= 5 + // LockedIn @ time > time(Started) && <= 5 + // Active @time > 5 + let vi_1 = MipInfo { + name: "MIP-0002".to_string(), + version: 2, + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: MassaTime::from_millis(2), + timeout: MassaTime::from_millis(5), + activation_delay: MassaTime::from_millis(2), + }; + // Another versioning info (from an attacker) for testing + let vi_2 = MipInfo { + name: "MIP-0002".to_string(), + version: 2, + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: MassaTime::from_millis(7), + timeout: MassaTime::from_millis(10), + activation_delay: MassaTime::from_millis(2), + }; + + let vsh = MipState { + state: ComponentState::Error, + history: Default::default(), + }; + // At state Error -> (always) false + assert_eq!(vsh.is_coherent_with(&vi_1), Err(IsCoherentError::AtError)); + + let vsh = MipState { + state: ComponentState::defined(), + history: Default::default(), + }; + // At state Defined but no history -> false + assert_eq!(vsh.is_coherent_with(&vi_1).is_ok(), false); + + let mut vsh = MipState::new(MassaTime::from_millis(1)); + // At state Defined at time 1 -> true, given vi_1 @ time 1 + assert_eq!(vsh.is_coherent_with(&vi_1).is_ok(), true); + // At state Defined at time 1 -> false given vi_1 @ time 3 (state should be Started) + // assert_eq!(vsh.is_coherent_with(&vi_1, MassaTime::from_millis(3)), false); + + // Advance to Started + let now = MassaTime::from_millis(3); + let adv = Advance::from((&vi_1, &Amount::zero(), &now)); + vsh.on_advance(&adv); + let now = MassaTime::from_millis(4); + let adv = Advance::from((&vi_1, &Amount::from_str("14.42").unwrap(), &now)); + vsh.on_advance(&adv); + + // At state Started at time now -> true + assert_eq!( + vsh.state, + ComponentState::started(Amount::from_str("14.42").unwrap()) + ); + assert_eq!(vsh.is_coherent_with(&vi_1).is_ok(), true); + // Now with another versioning info + assert_eq!(vsh.is_coherent_with(&vi_2).is_ok(), false); + + // Advance to LockedIn + let now = MassaTime::from_millis(4); + let adv = Advance::from((&vi_1, &VERSIONING_THRESHOLD_TRANSITION_ACCEPTED, &now)); + vsh.on_advance(&adv); + + // At state LockedIn at time now -> true + assert_eq!(vsh.state, ComponentState::locked_in(now)); + assert_eq!(vsh.is_coherent_with(&vi_1).is_ok(), true); + + // edge cases + // TODO: history all good but does not start with Defined, start with Started + } + + #[test] + fn test_update_with() { + // Test MipStoreRaw.update_with method (e.g. update a store from another, used in bootstrap) + + let vi_1 = MipInfo { + name: "MIP-0002".to_string(), + version: 2, + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: MassaTime::from_millis(2), + timeout: MassaTime::from_millis(5), + activation_delay: MassaTime::from_millis(2), + }; + + let _time = MassaTime::now().unwrap(); + let vs_1 = advance_state_until(ComponentState::active(_time), &vi_1); + assert!(matches!(vs_1.state, ComponentState::Active(_))); + + let vi_2 = MipInfo { + name: "MIP-0003".to_string(), + version: 3, + components: BTreeMap::from([(MipComponent::Address, 2)]), + start: MassaTime::from_millis(17), + timeout: MassaTime::from_millis(27), + activation_delay: MassaTime::from_millis(2), + }; + let vs_2 = advance_state_until(ComponentState::defined(), &vi_2); + + let mip_stats_cfg = MipStatsConfig { + block_count_considered: 10, + counters_max: 5, + }; + let mut vs_raw_1 = MipStoreRaw::try_from(( + [(vi_1.clone(), vs_1.clone()), (vi_2.clone(), vs_2.clone())], + mip_stats_cfg.clone(), + )) + .unwrap(); + + let vs_2_2 = advance_state_until(ComponentState::active(_time), &vi_2); + assert!(matches!(vs_2_2.state, ComponentState::Active(_))); + + let vs_raw_2 = MipStoreRaw::try_from(( + [(vi_1.clone(), vs_1.clone()), (vi_2.clone(), vs_2_2.clone())], + mip_stats_cfg, + )) + .unwrap(); + + println!("update with:"); + let (updated, added) = vs_raw_1.update_with(&vs_raw_2).unwrap(); + + // Check update_with result + assert!(added.is_empty()); + assert_eq!(updated, vec![vi_2.clone()]); + + // Expect state 1 (for vi_1) no change, state 2 (for vi_2) updated to "Active" + assert_eq!(vs_raw_1.store.get(&vi_1).unwrap().state, vs_1.state); + assert_eq!(vs_raw_1.store.get(&vi_2).unwrap().state, vs_2_2.state); + } + + #[test] + fn test_update_with_invalid() { + // Test updating a MIP store with another invalid one: + // case 1: overlapping time range + // case 2: overlapping versioning component + + // part 0 - defines data for the test + let vi_1 = MipInfo { + name: "MIP-0002".to_string(), + version: 2, + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: MassaTime::from_millis(0), + timeout: MassaTime::from_millis(5), + activation_delay: MassaTime::from_millis(2), + }; + let _time = MassaTime::now().unwrap(); + let vs_1 = advance_state_until(ComponentState::active(_time), &vi_1); + assert!(matches!(vs_1.state, ComponentState::Active(_))); + + let vi_2 = MipInfo { + name: "MIP-0003".to_string(), + version: 3, + components: BTreeMap::from([(MipComponent::Address, 2)]), + start: MassaTime::from_millis(17), + timeout: MassaTime::from_millis(27), + activation_delay: MassaTime::from_millis(2), + }; + let vs_2 = advance_state_until(ComponentState::defined(), &vi_2); + assert_eq!(vs_2, ComponentState::defined()); + + let mip_stats_cfg = MipStatsConfig { + block_count_considered: 10, + counters_max: 5, + }; + + // case 1 + { + let mut vs_raw_1 = MipStoreRaw::try_from(( + [(vi_1.clone(), vs_1.clone()), (vi_2.clone(), vs_2.clone())], + mip_stats_cfg.clone(), + )) + .unwrap(); + + let mut vi_2_2 = vi_2.clone(); + // Make mip info invalid (because start == vi_1.timeout) + vi_2_2.start = vi_1.timeout; + let vs_2_2 = advance_state_until(ComponentState::defined(), &vi_2_2); + let vs_raw_2 = MipStoreRaw { + store: BTreeMap::from([ + (vi_1.clone(), vs_1.clone()), + (vi_2_2.clone(), vs_2_2.clone()), + ]), + stats: MipStoreStats::new(mip_stats_cfg.clone()), + }; + + assert_matches!( + vs_raw_1.update_with(&vs_raw_2), + Err(UpdateWithError::Overlapping(..)) + ); + assert_eq!(vs_raw_1.store.get(&vi_1).unwrap().state, vs_1.state); + assert_eq!(vs_raw_1.store.get(&vi_2).unwrap().state, vs_2.state); + + // Check that try_from fails too (because it uses update_with internally) + { + let _vs_raw_2_ = MipStoreRaw::try_from(( + [ + (vi_1.clone(), vs_1.clone()), + (vi_2_2.clone(), vs_2_2.clone()), + ], + mip_stats_cfg.clone(), + )); + assert_eq!(_vs_raw_2_.is_err(), true); + } + } + + // case 2 + { + let mut vs_raw_1 = MipStoreRaw::try_from(( + [(vi_1.clone(), vs_1.clone()), (vi_2.clone(), vs_2.clone())], + mip_stats_cfg.clone(), + )) + .unwrap(); + + let mut vi_2_2 = vi_2.clone(); + vi_2_2.components = vi_1.components.clone(); + + let vs_2_2 = advance_state_until(ComponentState::defined(), &vi_2_2); + let vs_raw_2 = MipStoreRaw { + store: BTreeMap::from([ + (vi_1.clone(), vs_1.clone()), + (vi_2_2.clone(), vs_2_2.clone()), + ]), + stats: MipStoreStats::new(mip_stats_cfg.clone()), + }; + + // Component states being equal should produce an Ok result + // We also have vi.1.components == vi_2_2.components ~ overlapping versions + // TODO: clarify how this is supposed to behave + assert_matches!(vs_raw_1.update_with(&vs_raw_2), Ok(_)); + } + } + + #[test] + fn test_empty_mip_store() { + // Test if we can init an empty MipStore + + let mip_stats_config = MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }; + + let mip_store = MipStore::try_from(([], mip_stats_config)); + assert_eq!(mip_store.is_ok(), true); + } + + #[test] + fn test_update_with_unknown() { + // Test update_with with unknown MipComponent (can happen if a node software is outdated) + + // data + let mip_stats_config = MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }; + + let mut mip_store_raw_1 = MipStoreRaw::try_from(([], mip_stats_config.clone())).unwrap(); + + let mi_1 = MipInfo { + name: "MIP-0002".to_string(), + version: 2, + components: BTreeMap::from([(MipComponent::__Nonexhaustive, 1)]), + start: MassaTime::from_millis(0), + timeout: MassaTime::from_millis(5), + activation_delay: MassaTime::from_millis(2), + }; + let ms_1 = advance_state_until(ComponentState::defined(), &mi_1); + assert_eq!(ms_1, ComponentState::defined()); + let mip_store_raw_2 = MipStoreRaw { + store: BTreeMap::from([(mi_1.clone(), ms_1.clone())]), + stats: MipStoreStats::new(mip_stats_config.clone()), + }; + + let (updated, added) = mip_store_raw_1.update_with(&mip_store_raw_2).unwrap(); + + assert_eq!(updated.len(), 0); + assert_eq!(added.len(), 1); + assert_eq!(added.get(&mi_1).unwrap().state, ComponentState::defined()); + } + + #[test] + fn test_mip_store_network_restart() { + // Test if we can get a coherent MipStore after a network shutdown + + let genesis_timestamp = MassaTime::from_millis(0); + + let shutdown_start = Slot::new(2, 0); + let shutdown_end = Slot::new(8, 0); + + // helper functions so the test code is easy to read + let get_slot_ts = + |slot| get_block_slot_timestamp(THREAD_COUNT, T0, genesis_timestamp, slot).unwrap(); + let is_coherent = |store: &MipStoreRaw, shutdown_start, shutdown_end| { + store + .is_coherent_with_shutdown_period( + shutdown_start, + shutdown_end, + THREAD_COUNT, + T0, + genesis_timestamp, + ) + .unwrap() + }; + let update_store = |store: &mut MipStoreRaw, shutdown_start, shutdown_end| { + store + .update_for_network_shutdown( + shutdown_start, + shutdown_end, + THREAD_COUNT, + T0, + genesis_timestamp, + ) + .unwrap() + }; + let _dump_store = |store: &MipStoreRaw| { + println!("Dump store:"); + for (mip_info, mip_state) in store.store.iter() { + println!( + "mip_info {} {} - start: {} - timeout: {}: state: {:?}", + mip_info.name, + mip_info.version, + get_closest_slot_to_timestamp( + THREAD_COUNT, + T0, + genesis_timestamp, + mip_info.start + ), + get_closest_slot_to_timestamp( + THREAD_COUNT, + T0, + genesis_timestamp, + mip_info.timeout + ), + mip_state.state + ); + } + }; + // end helpers + + let mip_stats_cfg = MipStatsConfig { + block_count_considered: 10, + counters_max: 5, + }; + let mut mi_1 = MipInfo { + name: "MIP-0002".to_string(), + version: 2, + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: MassaTime::from_millis(2), + timeout: MassaTime::from_millis(5), + activation_delay: MassaTime::from_millis(100), + }; + let mut mi_2 = MipInfo { + name: "MIP-0003".to_string(), + version: 3, + components: BTreeMap::from([(MipComponent::Address, 2)]), + start: MassaTime::from_millis(7), + timeout: MassaTime::from_millis(11), + activation_delay: MassaTime::from_millis(100), + }; + + // MipInfo 1 @ state 'Defined' should start during shutdown + { + mi_1.start = get_slot_ts(Slot::new(3, 7)); + mi_1.timeout = get_slot_ts(Slot::new(5, 7)); + mi_2.start = get_slot_ts(Slot::new(7, 7)); + mi_2.timeout = get_slot_ts(Slot::new(10, 7)); + + let ms_1 = advance_state_until(ComponentState::defined(), &mi_1); + let ms_2 = advance_state_until(ComponentState::defined(), &mi_2); + let mut store = MipStoreRaw::try_from(( + [(mi_1.clone(), ms_1), (mi_2.clone(), ms_2)], + mip_stats_cfg.clone(), + )) + .unwrap(); + + assert_eq!(is_coherent(&store, shutdown_start, shutdown_end), false); + update_store(&mut store, shutdown_start, shutdown_end); + assert_eq!(is_coherent(&store, shutdown_start, shutdown_end), true); + // _dump_store(&store); + } + + // MipInfo 1 @ state 'Defined' will start AFTER shutdown + { + mi_1.start = get_slot_ts(Slot::new(9, 7)); + mi_1.timeout = get_slot_ts(Slot::new(11, 7)); + mi_2.start = get_slot_ts(Slot::new(12, 7)); + mi_2.timeout = get_slot_ts(Slot::new(19, 7)); + + let ms_1 = advance_state_until(ComponentState::defined(), &mi_1); + let ms_2 = advance_state_until(ComponentState::defined(), &mi_2); + let mut store = MipStoreRaw::try_from(( + [(mi_1.clone(), ms_1), (mi_2.clone(), ms_2)], + mip_stats_cfg.clone(), + )) + .unwrap(); + let store_orig = store.clone(); + + // Already ok even with a shutdown but let's check it + assert_eq!(is_coherent(&store, shutdown_start, shutdown_end), true); + // _dump_store(&store); + update_store(&mut store, shutdown_start, shutdown_end); + assert_eq!(is_coherent(&store, shutdown_start, shutdown_end), true); + // _dump_store(&store); + + // Check that nothing has changed + assert_eq!(store_orig, store); + } + + // MipInfo 1 @ state 'Started' before shutdown + { + mi_1.start = get_slot_ts(Slot::new(1, 7)); + mi_1.timeout = get_slot_ts(Slot::new(5, 7)); + mi_2.start = get_slot_ts(Slot::new(7, 7)); + mi_2.timeout = get_slot_ts(Slot::new(10, 7)); + + let ms_1 = advance_state_until(ComponentState::started(Amount::zero()), &mi_1); + let ms_2 = advance_state_until(ComponentState::defined(), &mi_2); + let mut store = MipStoreRaw::try_from(( + [(mi_1.clone(), ms_1), (mi_2.clone(), ms_2)], + mip_stats_cfg.clone(), + )) + .unwrap(); + + assert_eq!(is_coherent(&store, shutdown_start, shutdown_end), false); + update_store(&mut store, shutdown_start, shutdown_end); + assert_eq!(is_coherent(&store, shutdown_start, shutdown_end), true); + // _dump_store(&store); + } + + // MipInfo 1 @ state 'LockedIn' with transition during shutdown + { + let shutdown_range = shutdown_start..=shutdown_end; + + mi_1.start = get_slot_ts(Slot::new(1, 7)); + mi_1.timeout = get_slot_ts(Slot::new(5, 7)); + + // Just before shutdown + let locked_in_at = Slot::new(1, 9); + assert!(locked_in_at < shutdown_start); + let activate_at = Slot::new(4, 0); + assert!(shutdown_range.contains(&activate_at)); + // MIP 1 in state 'LockedIn', should transition to 'Active' during shutdown period + mi_1.activation_delay = + get_slot_ts(activate_at).saturating_sub(get_slot_ts(locked_in_at)); + let ms_1 = advance_state_until( + ComponentState::locked_in(get_slot_ts(Slot::new(1, 9))), + &mi_1, + ); + + // MIP 2 in state 'Defined' + mi_2.start = get_slot_ts(Slot::new(7, 7)); + mi_2.timeout = get_slot_ts(Slot::new(10, 7)); + let ms_2 = advance_state_until(ComponentState::defined(), &mi_2); + let mut store = MipStoreRaw::try_from(( + [(mi_1.clone(), ms_1), (mi_2.clone(), ms_2)], + mip_stats_cfg.clone(), + )) + .unwrap(); + + assert_eq!(is_coherent(&store, shutdown_start, shutdown_end), false); + // _dump_store(&store); + update_store(&mut store, shutdown_start, shutdown_end); + assert_eq!(is_coherent(&store, shutdown_start, shutdown_end), true); + // _dump_store(&store); + + // Update stats - so should force transitions if any + store.update_network_version_stats( + get_slot_ts(shutdown_end.get_next_slot(THREAD_COUNT).unwrap()), + Some((1, 0)), + ); + + let (first_mi_info, first_mi_state) = store.store.first_key_value().unwrap(); + assert_eq!(*first_mi_info.name, mi_1.name); + // State was 'LockedIn' -> reset, start ts now defined right after network restart + assert_eq!( + ComponentStateTypeId::from(&first_mi_state.state), + ComponentStateTypeId::Started + ); + let (last_mi_info, last_mi_state) = store.store.last_key_value().unwrap(); + assert_eq!(*last_mi_info.name, mi_2.name); + // State was 'Defined' -> start is set up after MIP 1 start & timeout + assert_eq!( + ComponentStateTypeId::from(&last_mi_state.state), + ComponentStateTypeId::Defined + ); + } + } + + #[test] + fn test_mip_store_db() { + // Test interaction of MIP store with MassaDB + // 1- init from db (empty disk) + // 2- update state + // 3- write changes to db + // 4- init a new mip store from disk and compare + + let genesis_timestamp = MassaTime::from_millis(0); + // helpers + let get_slot_ts = + |slot| get_block_slot_timestamp(THREAD_COUNT, T0, genesis_timestamp, slot).unwrap(); + + // Db init + + let temp_dir = tempdir().expect("Unable to create a temp folder"); + // println!("Using temp dir: {:?}", temp_dir.path()); + + let db_config = MassaDBConfig { + path: temp_dir.path().to_path_buf(), + max_history_length: 100, + max_new_elements: 100, + thread_count: THREAD_COUNT, + }; + let db = Arc::new(RwLock::new(MassaDB::new(db_config))); + + // MIP info / store init + + let mi_1 = MipInfo { + name: "MIP-0002".to_string(), + version: 2, + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: get_slot_ts(Slot::new(2, 0)), + timeout: get_slot_ts(Slot::new(3, 0)), + activation_delay: MassaTime::from_millis(10), + }; + let ms_1 = advance_state_until(ComponentState::defined(), &mi_1); + + let mi_2 = MipInfo { + name: "MIP-0003".to_string(), + version: 3, + components: BTreeMap::from([(MipComponent::Address, 2)]), + start: get_slot_ts(Slot::new(4, 2)), + timeout: get_slot_ts(Slot::new(7, 2)), + activation_delay: MassaTime::from_millis(10), + }; + let ms_2 = advance_state_until(ComponentState::defined(), &mi_2); + + let mip_stats_config = MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + counters_max: MIP_STORE_STATS_COUNTERS_MAX, + }; + let mut mip_store = MipStore::try_from(( + [(mi_1.clone(), ms_1.clone()), (mi_2.clone(), ms_2.clone())], + mip_stats_config.clone(), + )) + .expect("Cannot create an empty MIP store"); + + // Step 1 + + mip_store.extend_from_db(db.clone()).unwrap(); + // Check that we extend from an empty folder + assert_eq!(mip_store.0.read().store.len(), 2); + assert_eq!( + mip_store.0.read().store.first_key_value(), + Some((&mi_1, &ms_1)) + ); + assert_eq!( + mip_store.0.read().store.last_key_value(), + Some((&mi_2, &ms_2)) + ); + + // Step 2 + let active_at = get_slot_ts(Slot::new(2, 5)); + let ms_1_ = advance_state_until(ComponentState::active(active_at), &mi_1); + + let mip_store_ = + MipStore::try_from(([(mi_1.clone(), ms_1_.clone())], mip_stats_config.clone())) + .expect("Cannot create an empty MIP store"); + + let (updated, added) = mip_store.update_with(&mip_store_).unwrap(); + + // Check update_with result - only 1 state should be updated + assert_eq!(updated.len(), 1); + assert_eq!(added.len(), 0); + assert_eq!(mip_store.0.read().store.len(), 2); + assert_eq!( + mip_store.0.read().store.first_key_value(), + Some((&mi_1, &ms_1_)) + ); + assert_eq!( + mip_store.0.read().store.last_key_value(), + Some((&mi_2, &ms_2)) + ); + + // Step 3 + + let mut db_batch = DBBatch::new(); + let mut db_versioning_batch = DBBatch::new(); + + // FIXME: get slot right after active at - no hardcode + let slot_bounds_ = (&Slot::new(1, 0), &Slot::new(4, 2)); + let between = (&get_slot_ts(*slot_bounds_.0), &get_slot_ts(*slot_bounds_.1)); + + mip_store + .update_batches(&mut db_batch, &mut db_versioning_batch, between) + .unwrap(); + + assert_eq!(db_batch.len(), 1); + assert_eq!(db_versioning_batch.len(), 2); // + stats + + let mut guard_db = db.write(); + // FIXME / TODO: no slot hardcoding? + guard_db.write_batch(db_batch, db_versioning_batch, Some(Slot::new(3, 0))); + drop(guard_db); + + // Step 4 + let mut mip_store_2 = MipStore::try_from(( + [(mi_1.clone(), ms_1.clone()), (mi_2.clone(), ms_2.clone())], + mip_stats_config.clone(), + )) + .expect("Cannot create an empty MIP store"); + // assert_eq!(mip_store_2.0.read().store.len(), 0); + + mip_store_2.extend_from_db(db.clone()).unwrap(); + + let guard_1 = mip_store.0.read(); + let guard_2 = mip_store_2.0.read(); + let st1_raw = guard_1.deref(); + let st2_raw = guard_2.deref(); + + // println!("st1_raw: {:?}", st1_raw); + // println!("st2_raw: {:?}", st2_raw); + assert_eq!(st1_raw, st2_raw); + } + + #[test] + fn test_mip_store_stats() { + // Test MipStoreRaw stats + + // helper functions so the test code is easy to read + let genesis_timestamp = MassaTime::from_millis(0); + let get_slot_ts = + |slot| get_block_slot_timestamp(THREAD_COUNT, T0, genesis_timestamp, slot).unwrap(); + + let mip_stats_config = MipStatsConfig { + block_count_considered: 2, + counters_max: 1, + }; + let activation_delay = MassaTime::from_millis(100); + let timeout = MassaTime::now() + .unwrap() + .saturating_add(MassaTime::from_millis(50_000)); // + 50 seconds + let mi_1 = MipInfo { + name: "MIP-0001".to_string(), + version: 1, + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: MassaTime::from_millis(2), + timeout, + activation_delay, + }; + let ms_1 = advance_state_until(ComponentState::started(Amount::zero()), &mi_1); + + let mut mip_store = + MipStoreRaw::try_from(([(mi_1.clone(), ms_1)], mip_stats_config)).unwrap(); + + // + // mip_store.update_network_version_stats(get_slot_ts(Slot::new(1, 0)), Some((0, 0))); + // TODO: should not add a counter for version 0 ? + // assert_eq!(mip_store.stats.network_version_counters.len(), 0); + + // Current network version is 0, next one is 1 + mip_store.update_network_version_stats(get_slot_ts(Slot::new(1, 0)), Some((0, 1))); + assert_eq!(mip_store.stats.network_version_counters.len(), 1); + assert_eq!(mip_store.stats.network_version_counters.get(&1), Some(&1)); + + mip_store.update_network_version_stats(get_slot_ts(Slot::new(1, 0)), Some((0, 1))); + assert_eq!(mip_store.stats.network_version_counters.len(), 1); + assert_eq!(mip_store.stats.network_version_counters.get(&1), Some(&2)); + + // Check that MipInfo is now + let (mi_, ms_) = mip_store.store.last_key_value().unwrap(); + assert_eq!(*mi_, mi_1); + assert_matches!(ms_.state, ComponentState::LockedIn(..)); + + let mut at = MassaTime::now().unwrap(); + at = at.saturating_add(activation_delay); + assert_eq!( + ms_.state_at(at, mi_1.start, mi_1.timeout), + Ok(ComponentStateTypeId::Active) + ); + + // Now network version is 1, next one is 2 + mip_store.update_network_version_stats(get_slot_ts(Slot::new(1, 0)), Some((1, 2))); + // Config is set to allow only 1 counter + assert_eq!(mip_store.stats.network_version_counters.len(), 1); + assert_eq!(mip_store.stats.network_version_counters.get(&2), Some(&1)); + } +} diff --git a/massa-versioning-worker/src/versioning_factory.rs b/massa-versioning/src/versioning_factory.rs similarity index 67% rename from massa-versioning-worker/src/versioning_factory.rs rename to massa-versioning/src/versioning_factory.rs index e15aad9fd74..ccc4f1f1a08 100644 --- a/massa-versioning-worker/src/versioning_factory.rs +++ b/massa-versioning/src/versioning_factory.rs @@ -7,7 +7,7 @@ use crate::versioning::{ComponentState, ComponentStateTypeId, MipComponent, MipS /// Factory error #[allow(missing_docs)] -#[derive(Error, Debug)] +#[derive(Error, Debug, Clone)] pub enum FactoryError { #[error("Unknown version, cannot build obj with version: {0}")] UnknownVersion(u32), @@ -23,7 +23,7 @@ pub enum FactoryError { /// Strategy to use when creating a new object from a factory pub enum FactoryStrategy { /// use get_latest_version (see Factory trait) - Latest, + // Latest, /// Require to create an object with this specific version Exact(u32), /// Create an object given a timestamp (e.g slot) @@ -54,19 +54,19 @@ pub trait VersioningFactory { /// Access to the MipStore fn get_versioning_store(&self) -> MipStore; + /* /// Get latest component version (aka last active for the factory component) fn get_latest_component_version(&self) -> u32 { let component = Self::get_component(); let vi_store_ = self.get_versioning_store(); let vi_store = vi_store_.0.read(); - let state_active = ComponentState::active(); vi_store .store .iter() .rev() .find_map(|(vi, vsh)| { - if vsh.state == state_active { + if matches!(vsh.state, ComponentState::Active(_)) { vi.components.get(&component).copied() } else { None @@ -74,13 +74,13 @@ pub trait VersioningFactory { }) .unwrap_or(0) } + */ /// Get latest version at given timestamp (e.g. slot) fn get_latest_component_version_at(&self, ts: MassaTime) -> Result { let component = Self::get_component(); let vi_store_ = self.get_versioning_store(); let vi_store = vi_store_.0.read(); - let state_active = ComponentState::active(); // Iter backward, filter component + state active, let version = vi_store @@ -88,7 +88,8 @@ pub trait VersioningFactory { .iter() .rev() .filter(|(vi, vsh)| { - vi.components.get(&component).is_some() && vsh.state == state_active + vi.components.get(&component).is_some() + && matches!(vsh.state, ComponentState::Active(_)) }) .find_map(|(vi, vsh)| { let res = vsh.state_at(ts, vi.start, vi.timeout); @@ -108,9 +109,8 @@ pub trait VersioningFactory { let vi_store_ = self.get_versioning_store(); let vi_store = vi_store_.0.read(); - let state_active = ComponentState::active(); let versions_iter = vi_store.store.iter().filter_map(|(vi, vsh)| { - if vsh.state == state_active { + if matches!(vsh.state, ComponentState::Active(_)) { vi.components.get(&component).copied() } else { None @@ -139,11 +139,29 @@ pub trait VersioningFactory { .collect() } + /// Get the version the current component with the given startegy + fn get_component_version_with_strategy( + &self, + strategy: FactoryStrategy, + ) -> Result { + match strategy { + FactoryStrategy::Exact(v) => match self.get_all_component_versions().get(&v) { + Some(s) if *s == ComponentStateTypeId::Active => Ok(v), + Some(s) if *s != ComponentStateTypeId::Active => { + Err(FactoryError::OnStateNotReady(v)) + } + _ => Err(FactoryError::UnknownVersion(v)), + }, + FactoryStrategy::At(ts) => self.get_latest_component_version_at(ts), + // None | Some(FactoryStrategy::Latest) => Ok(self.get_latest_component_version()), + } + } + /// Create an object of type Self::Output fn create( &self, args: &Self::Arguments, - strategy: Option, + strategy: FactoryStrategy, ) -> Result; } @@ -151,7 +169,7 @@ pub trait VersioningFactory { mod test { use super::*; - use std::collections::{BTreeMap, HashMap}; + use std::collections::BTreeMap; use crate::test_helpers::versioning_helpers::advance_state_until; use crate::versioning::{MipInfo, MipState, MipStatsConfig}; @@ -161,12 +179,12 @@ mod test { // Define a struct Address with 2 versions AddressV0 & AddressV1 #[allow(dead_code)] #[derive(Debug)] - struct AddressV0 { + struct TestAddressV0 { hash: String, } #[allow(dead_code)] - impl AddressV0 { + impl TestAddressV0 { fn new(hash: String) -> Self { Self { hash } } @@ -174,14 +192,14 @@ mod test { #[allow(dead_code)] #[derive(Debug)] - struct AddressV1 { + struct TestAddressV1 { slot: String, creator: String, index: u32, } #[allow(dead_code)] - impl AddressV1 { + impl TestAddressV1 { fn new(slot: String, creator: String, index: u32) -> Self { Self { slot, @@ -192,13 +210,12 @@ mod test { } #[derive(Debug)] - enum Address { - V0(AddressV0), - V1(AddressV1), + enum TestAddress { + V0(TestAddressV0), + V1(TestAddressV1), } - // - struct AddressArgs { + struct TestAddressArgs { // V0 hash: Option, // V1 @@ -210,14 +227,14 @@ mod test { // Now we define an Address factory #[derive(Debug)] - struct AddressFactory { + struct TestAddressFactory { versioning_store: MipStore, } - impl VersioningFactory for AddressFactory { - type Output = Address; + impl VersioningFactory for TestAddressFactory { + type Output = TestAddress; type Error = FactoryError; - type Arguments = AddressArgs; + type Arguments = TestAddressArgs; fn get_component() -> MipComponent { MipComponent::Address @@ -230,31 +247,18 @@ mod test { fn create( &self, args: &Self::Arguments, - strategy: Option, + strategy: FactoryStrategy, ) -> Result { - let version = match strategy { - Some(FactoryStrategy::Exact(v)) => { - // This is not optimal - can use get_versions and return a less descriptive error - match self.get_all_component_versions().get(&v) { - Some(s) if *s == ComponentStateTypeId::Active => Ok(v), - Some(s) if *s != ComponentStateTypeId::Active => { - Err(FactoryError::OnStateNotReady(v)) - } - _ => Err(FactoryError::UnknownVersion(v)), - } - } - Some(FactoryStrategy::At(ts)) => self.get_latest_component_version_at(ts), - None | Some(FactoryStrategy::Latest) => Ok(self.get_latest_component_version()), - }; + let version = self.get_component_version_with_strategy(strategy); match version { - Ok(0) => Ok(Address::V0(AddressV0 { + Ok(0) => Ok(TestAddress::V0(TestAddressV0 { hash: args.hash.clone().ok_or(FactoryError::OnCreate( stringify!(Self::Output).to_string(), "Please provide hash in args".to_string(), ))?, })), - Ok(1) => Ok(Address::V1(AddressV1 { + Ok(1) => Ok(TestAddress::V1(TestAddressV1 { slot: args.slot.clone().ok_or(FactoryError::OnCreate( stringify!(Self::Output).to_string(), "Please provide 'slot' in args".to_string(), @@ -273,22 +277,22 @@ mod test { let vi_1 = MipInfo { name: "MIP-0002".to_string(), version: 1, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(12), - timeout: MassaTime::from(15), - activation_delay: MassaTime::from(2), + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: MassaTime::from_millis(12), + timeout: MassaTime::from_millis(15), + activation_delay: MassaTime::from_millis(2), }; - let vs_1 = MipState::new(MassaTime::from(10)); + let vs_1 = MipState::new(MassaTime::from_millis(10)); let vi_2 = MipInfo { name: "MIP-0003".to_string(), version: 2, - components: HashMap::from([(MipComponent::Address, 2)]), - start: MassaTime::from(25), - timeout: MassaTime::from(28), - activation_delay: MassaTime::from(2), + components: BTreeMap::from([(MipComponent::Address, 2)]), + start: MassaTime::from_millis(25), + timeout: MassaTime::from_millis(28), + activation_delay: MassaTime::from_millis(2), }; - let vs_2 = MipState::new(MassaTime::from(18)); + let vs_2 = MipState::new(MassaTime::from_millis(18)); let mip_stats_cfg = MipStatsConfig { block_count_considered: 10, @@ -300,17 +304,17 @@ mod test { mip_stats_cfg, )) .unwrap(); - let fa = AddressFactory { + let fa = TestAddressFactory { versioning_store: vs.clone(), }; - let args = AddressArgs { + let args = TestAddressArgs { hash: Some("sdofjsklfhskfjl".into()), slot: Some("slot_4_2".to_string()), creator: Some("me_pubk".to_string()), index: Some(3), }; - let args_no_v1 = AddressArgs { + let args_no_v1 = TestAddressArgs { hash: Some("sdofjsklfhskfjl".into()), slot: None, creator: Some("me_pubk".to_string()), @@ -318,17 +322,17 @@ mod test { }; assert_eq!(fa.get_all_active_component_versions(), vec![0]); - assert_eq!(fa.get_latest_component_version(), 0); - let addr_a = fa.create(&args, None); - assert!(matches!(addr_a, Ok(Address::V0(_)))); + let addr_a = fa.create(&args, 0.into()); + assert!(matches!(addr_a, Ok(TestAddress::V0(_)))); // // Version 2 is unknown - let addr_ = fa.create(&args, Some(2.into())); + let addr_ = fa.create(&args, 2.into()); assert!(matches!(addr_, Err(FactoryError::OnStateNotReady(2)))); // Advance state 1 to Active - let vs_1_new = advance_state_until(ComponentState::active(), &vi_1); + let _time = MassaTime::now().unwrap(); + let vs_1_new = advance_state_until(ComponentState::active(_time), &vi_1); // Create a new factory let info = BTreeMap::from([(vi_1.clone(), vs_1_new.clone()), (vi_2.clone(), vs_2)]); // Update versioning store @@ -342,43 +346,44 @@ mod test { .collect::>(), vec![0, 1, 2] ); - assert_eq!(fa.get_latest_component_version(), 1); - let addr_b = fa.create(&args, None); - assert!(matches!(addr_b, Ok(Address::V1(_)))); + // assert_eq!(fa.get_latest_component_version(), 1); + let addr_b = fa.create(&args, FactoryStrategy::At(MassaTime::now().unwrap())); + assert!(matches!(addr_b, Ok(TestAddress::V1(_)))); // Error if not enough args - let addr_ = fa.create(&args_no_v1, Some(1.into())); + let addr_ = fa.create(&args_no_v1, 1.into()); assert!(matches!(addr_, Err(FactoryError::OnCreate(_, _)))); // Can still create AddressV0 - let addr_c = fa.create(&args, Some(0.into())); + let addr_c = fa.create(&args, 0.into()); println!("addr_c: {:?}", addr_c); - assert!(matches!(addr_c, Ok(Address::V0(_)))); + assert!(matches!(addr_c, Ok(TestAddress::V0(_)))); } #[test] fn test_factory_strategy_at() { // Test factory & FactoryStrategy::At(...) + let _time = MassaTime::now().unwrap(); let vi_1 = MipInfo { name: "MIP-0002".to_string(), version: 1, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(12), - timeout: MassaTime::from(15), - activation_delay: MassaTime::from(2), + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: MassaTime::from_millis(12), + timeout: MassaTime::from_millis(15), + activation_delay: MassaTime::from_millis(2), }; - let vs_1 = advance_state_until(ComponentState::active(), &vi_1); + let vs_1 = advance_state_until(ComponentState::active(_time), &vi_1); let vi_2 = MipInfo { name: "MIP-0003".to_string(), version: 2, - components: HashMap::from([(MipComponent::Address, 2)]), - start: MassaTime::from(25), - timeout: MassaTime::from(28), - activation_delay: MassaTime::from(2), + components: BTreeMap::from([(MipComponent::Address, 2)]), + start: MassaTime::from_millis(25), + timeout: MassaTime::from_millis(28), + activation_delay: MassaTime::from_millis(2), }; - let vs_2 = MipState::new(MassaTime::from(18)); + let vs_2 = MipState::new(MassaTime::from_millis(18)); let mip_stats_cfg = MipStatsConfig { block_count_considered: 10, @@ -391,11 +396,11 @@ mod test { )) .unwrap(); - let fa = AddressFactory { + let fa = TestAddressFactory { versioning_store: vs.clone(), }; - let args = AddressArgs { + let args = TestAddressArgs { hash: Some("sdofjsklfhskfjl".into()), slot: Some("slot_4_2".to_string()), creator: Some("me_pubk".to_string()), @@ -403,33 +408,33 @@ mod test { }; // - let st_1 = FactoryStrategy::At(MassaTime::from(8)); // vi_1 not yet defined - let ts_1_2 = MassaTime::from(13); + let st_1 = FactoryStrategy::At(MassaTime::from_millis(8)); // vi_1 not yet defined + let ts_1_2 = MassaTime::from_millis(13); let st_1_2 = FactoryStrategy::At(ts_1_2); // vi_1 is started (after vi_1.start) - let st_2 = FactoryStrategy::At(MassaTime::from(18)); // vi_1 is active (after start + activation delay) - let st_3 = FactoryStrategy::At(MassaTime::from(27)); // vi_2 is started or locked_in - let st_4 = FactoryStrategy::At(MassaTime::from(30)); // vi_2 is active (after vi_2.timeout) - - let addr_st_1 = fa.create(&args, Some(st_1)); - let addr_st_1_2 = fa.create(&args, Some(st_1_2.clone())); - let addr_st_2 = fa.create(&args, Some(st_2)); - let addr_st_3 = fa.create(&args, Some(st_3)); - let addr_st_4 = fa.create(&args, Some(st_4.clone())); - - assert!(matches!(addr_st_1, Ok(Address::V0(_)))); - assert!(matches!(addr_st_1_2, Ok(Address::V0(_)))); - assert!(matches!(addr_st_2, Ok(Address::V1(_)))); - assert!(matches!(addr_st_3, Ok(Address::V1(_)))); - assert!(matches!(addr_st_4, Ok(Address::V1(_)))); // for now, vs_2 is not active yet + let st_2 = FactoryStrategy::At(MassaTime::from_millis(18)); // vi_1 is active (after start + activation delay) + let st_3 = FactoryStrategy::At(MassaTime::from_millis(27)); // vi_2 is started or locked_in + let st_4 = FactoryStrategy::At(MassaTime::from_millis(30)); // vi_2 is active (after vi_2.timeout) + + let addr_st_1 = fa.create(&args, st_1); + let addr_st_1_2 = fa.create(&args, st_1_2.clone()); + let addr_st_2 = fa.create(&args, st_2); + let addr_st_3 = fa.create(&args, st_3); + let addr_st_4 = fa.create(&args, st_4.clone()); + + assert!(matches!(addr_st_1, Ok(TestAddress::V0(_)))); + assert!(matches!(addr_st_1_2, Ok(TestAddress::V0(_)))); + assert!(matches!(addr_st_2, Ok(TestAddress::V1(_)))); + assert!(matches!(addr_st_3, Ok(TestAddress::V1(_)))); + assert!(matches!(addr_st_4, Ok(TestAddress::V1(_)))); // for now, vs_2 is not active yet // Advance state 2 to Active - let vs_2_new = advance_state_until(ComponentState::active(), &vi_2); + let vs_2_new = advance_state_until(ComponentState::active(_time), &vi_2); let info = BTreeMap::from([(vi_1.clone(), vs_1), (vi_2.clone(), vs_2_new)]); // Update versioning store vs.0.write().store = info; assert_eq!(fa.get_all_active_component_versions(), vec![0, 1, 2]); - let addr_st_4 = fa.create(&args, Some(st_4)); + let addr_st_4 = fa.create(&args, st_4); // Version 2 is selected but this is not implemented in factory yet assert!(matches!( addr_st_4, diff --git a/massa-versioning-worker/src/versioning_ser_der.rs b/massa-versioning/src/versioning_ser_der.rs similarity index 89% rename from massa-versioning-worker/src/versioning_ser_der.rs rename to massa-versioning/src/versioning_ser_der.rs index 7d9017c2f77..04e28b5c07f 100644 --- a/massa-versioning-worker/src/versioning_ser_der.rs +++ b/massa-versioning/src/versioning_ser_der.rs @@ -11,8 +11,8 @@ use nom::{ }; use crate::versioning::{ - Advance, ComponentState, ComponentStateTypeId, LockedIn, MipComponent, MipInfo, MipState, - MipStatsConfig, MipStoreRaw, MipStoreStats, Started, + Active, Advance, ComponentState, ComponentStateTypeId, LockedIn, MipComponent, MipInfo, + MipState, MipStatsConfig, MipStoreRaw, MipStoreStats, Started, }; use massa_models::amount::{Amount, AmountDeserializer, AmountSerializer}; @@ -21,7 +21,7 @@ use massa_serialization::{ Deserializer, SerializeError, Serializer, U32VarIntDeserializer, U32VarIntSerializer, U64VarIntDeserializer, U64VarIntSerializer, }; -use massa_time::{MassaTimeDeserializer, MassaTimeSerializer}; +use massa_time::{MassaTime, MassaTimeDeserializer, MassaTimeSerializer}; /// Ser / Der @@ -129,8 +129,8 @@ impl MipInfoDeserializer { Excluded(MIP_INFO_NAME_MAX_LEN), ), time_deserializer: MassaTimeDeserializer::new(( - Included(0.into()), - Included(u64::MAX.into()), + Included(MassaTime::from_millis(0)), + Included(MassaTime::from_millis(u64::MAX)), )), } } @@ -251,21 +251,19 @@ impl Serializer for ComponentStateSerializer { value: &ComponentState, buffer: &mut Vec, ) -> Result<(), SerializeError> { + let state_id = u32::from(ComponentStateTypeId::from(value)); + self.u32_serializer.serialize(&state_id, buffer)?; match value { ComponentState::Started(Started { threshold }) => { - let state_id = u32::from(ComponentStateTypeId::from(value)); - self.u32_serializer.serialize(&state_id, buffer)?; self.amount_serializer.serialize(threshold, buffer)?; } ComponentState::LockedIn(LockedIn { at }) => { - let state_id = u32::from(ComponentStateTypeId::from(value)); - self.u32_serializer.serialize(&state_id, buffer)?; self.time_serializer.serialize(at, buffer)?; } - _ => { - let state_id = u32::from(ComponentStateTypeId::from(value)); - self.u32_serializer.serialize(&state_id, buffer)?; + ComponentState::Active(Active { at }) => { + self.time_serializer.serialize(at, buffer)?; } + _ => {} } Ok(()) } @@ -291,8 +289,8 @@ impl ComponentStateDeserializer { Included(Amount::MAX), ), time_deserializer: MassaTimeDeserializer::new(( - Included(0.into()), - Included(u64::MAX.into()), + Included(MassaTime::from_millis(0)), + Included(MassaTime::from_millis(u64::MAX)), )), } } @@ -336,7 +334,13 @@ impl Deserializer for ComponentStateDeserializer { .parse(rem)?; (rem2, ComponentState::locked_in(at)) } - ComponentStateTypeId::Active => (rem, ComponentState::active()), + ComponentStateTypeId::Active => { + let (rem2, at) = context("Failed at value der", |input| { + self.time_deserializer.deserialize(input) + }) + .parse(rem)?; + (rem2, ComponentState::active(at)) + } ComponentStateTypeId::Failed => (rem, ComponentState::failed()), _ => (rem, ComponentState::Error), }; @@ -404,8 +408,8 @@ impl AdvanceDeserializer { Included(Amount::MAX), ), time_deserializer: MassaTimeDeserializer::new(( - Included(0.into()), - Included(u64::MAX.into()), + Included(MassaTime::from_millis(0)), + Included(MassaTime::from_millis(u64::MAX)), )), } } @@ -472,7 +476,7 @@ impl MipStateSerializer { Self { state_serializer: Default::default(), advance_serializer: Default::default(), - u32_serializer: U32VarIntSerializer::default(), + u32_serializer: U32VarIntSerializer, } } } @@ -556,6 +560,7 @@ impl Deserializer for MipStateDeserializer { self.advance_deserializer.deserialize(input) }), context("Failed state id deserialization", |input| { + // TEST NOTE: deser fails here for last two tests let (res, state_id_) = self.state_id_deserializer.deserialize(input)?; let state_id = @@ -640,8 +645,8 @@ impl Serializer for MipStoreStatsSerializer { if entry_count > entry_count_max { return Err(SerializeError::GeneralError(format!( - "Too many entries in MipStoreStats latest announcements, max: {}", - MIP_STORE_STATS_BLOCK_CONSIDERED + "Too many entries in MipStoreStats latest announcements, max: {}, received: {}", + entry_count_max, entry_count ))); } self.u32_serializer.serialize(&entry_count, buffer)?; @@ -661,8 +666,8 @@ impl Serializer for MipStoreStatsSerializer { if entry_count_2 > entry_count_2_max { return Err(SerializeError::GeneralError(format!( - "Too many entries in MipStoreStats version counters, max: {}", - MIP_STORE_STATS_COUNTERS_MAX + "Too many entries in MipStoreStats version counters, max: {}, received: {}", + entry_count_2_max, entry_count_2 ))); } self.u32_serializer.serialize(&entry_count_2, buffer)?; @@ -830,8 +835,8 @@ impl Serializer for MipStoreRawSerializer { })?; if entry_count > MIP_STORE_MAX_ENTRIES { return Err(SerializeError::GeneralError(format!( - "Too many entries in VersioningStoreRaw, max: {}", - MIP_STORE_MAX_ENTRIES + "Too many entries in VersioningStoreRaw, max: {}, received: {}", + MIP_STORE_MAX_ENTRIES, entry_count ))); } self.u32_serializer.serialize(&entry_count, buffer)?; @@ -916,11 +921,10 @@ impl Deserializer for MipStoreRawDeserializer { mod test { use super::*; - use std::collections::HashMap; + use std::assert_matches::assert_matches; use std::mem::{size_of, size_of_val}; use std::str::FromStr; - use chrono::{NaiveDate, NaiveDateTime}; use more_asserts::assert_lt; use crate::test_helpers::versioning_helpers::advance_state_until; @@ -928,15 +932,44 @@ mod test { use massa_serialization::DeserializeError; use massa_time::MassaTime; + #[test] + fn test_mip_component_non_exhaustive() { + let last_variant__ = std::mem::variant_count::() - 2; // -1 for Nonexhaustive, -1 for index start at 0 + let last_variant_ = u32::try_from(last_variant__).unwrap(); + let last_variant = MipComponent::from(last_variant_); + + match last_variant { + MipComponent::__Nonexhaustive => { + panic!("Should be a known enum value") + } + _ => { + // all good + println!("last variant of MipComponent is: {:?}", last_variant); + } + } + + { + let variant__ = std::mem::variant_count::() - 1; + let variant_ = u32::try_from(variant__).unwrap(); + assert_matches!(MipComponent::from(variant_), MipComponent::__Nonexhaustive); + } + + { + let variant__ = std::mem::variant_count::(); + let variant_ = u32::try_from(variant__).unwrap(); + assert_matches!(MipComponent::from(variant_), MipComponent::__Nonexhaustive); + } + } + #[test] fn test_mip_info_ser_der() { let vi_1 = MipInfo { name: "MIP-0002".to_string(), version: 2, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(2), - timeout: MassaTime::from(5), - activation_delay: MassaTime::from(2), + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: MassaTime::from_millis(2), + timeout: MassaTime::from_millis(5), + activation_delay: MassaTime::from_millis(2), }; let mut buf = Vec::new(); @@ -979,27 +1012,16 @@ mod test { #[test] fn test_advance_ser_der() { - let start: NaiveDateTime = NaiveDate::from_ymd_opt(2017, 11, 01) - .unwrap() - .and_hms_opt(7, 33, 44) - .unwrap(); - - let timeout: NaiveDateTime = NaiveDate::from_ymd_opt(2017, 11, 11) - .unwrap() - .and_hms_opt(7, 33, 44) - .unwrap(); - - let now: NaiveDateTime = NaiveDate::from_ymd_opt(2017, 05, 11) - .unwrap() - .and_hms_opt(11, 33, 44) - .unwrap(); + let start = MassaTime::from_utc_ymd_hms(2017, 11, 01, 7, 33, 44).unwrap(); + let timeout = MassaTime::from_utc_ymd_hms(2017, 11, 11, 7, 33, 44).unwrap(); + let now = MassaTime::from_utc_ymd_hms(2017, 05, 11, 11, 33, 44).unwrap(); let adv = Advance { - start_timestamp: MassaTime::from(start.timestamp() as u64), - timeout: MassaTime::from(timeout.timestamp() as u64), + start_timestamp: start, + timeout, threshold: Default::default(), - now: MassaTime::from(now.timestamp() as u64), - activation_delay: MassaTime::from(20), + now, + activation_delay: MassaTime::from_millis(20), }; let mut buf = Vec::new(); @@ -1016,7 +1038,7 @@ mod test { #[test] fn test_mip_state_ser_der() { - let state_1 = MipState::new(MassaTime::from(100)); + let state_1 = MipState::new(MassaTime::from_millis(100)); let mut buf = Vec::new(); let state_ser = MipStateSerializer::new(); @@ -1033,13 +1055,14 @@ mod test { let mi_1 = MipInfo { name: "MIP-0002".to_string(), version: 2, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(2), - timeout: MassaTime::from(5), - activation_delay: MassaTime::from(2), + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: MassaTime::from_millis(2), + timeout: MassaTime::from_millis(5), + activation_delay: MassaTime::from_millis(2), }; - let state_2 = advance_state_until(ComponentState::locked_in(MassaTime::from(3)), &mi_1); + let state_2 = + advance_state_until(ComponentState::locked_in(MassaTime::from_millis(3)), &mi_1); state_ser.serialize(&state_2, &mut buf).unwrap(); let (rem2, state_der_res) = state_der.deserialize::(&buf).unwrap(); @@ -1086,22 +1109,23 @@ mod test { let mi_2 = MipInfo { name: "MIP-0002".to_string(), version: 2, - components: HashMap::from([(MipComponent::Address, 1)]), - start: MassaTime::from(2), - timeout: MassaTime::from(5), - activation_delay: MassaTime::from(2), + components: BTreeMap::from([(MipComponent::Address, 1)]), + start: MassaTime::from_millis(2), + timeout: MassaTime::from_millis(5), + activation_delay: MassaTime::from_millis(2), }; let mi_3 = MipInfo { name: "MIP-0003".to_string(), version: 3, - components: HashMap::from([(MipComponent::Block, 1)]), - start: MassaTime::from(12), - timeout: MassaTime::from(17), - activation_delay: MassaTime::from(2), + components: BTreeMap::from([(MipComponent::Block, 1)]), + start: MassaTime::from_millis(12), + timeout: MassaTime::from_millis(17), + activation_delay: MassaTime::from_millis(2), }; - let state_2 = advance_state_until(ComponentState::active(), &mi_2); + let _time = MassaTime::now().unwrap(); + let state_2 = advance_state_until(ComponentState::active(_time), &mi_2); let state_3 = advance_state_until( ComponentState::started(Amount::from_str("42.4242").unwrap()), &mi_3, @@ -1126,10 +1150,10 @@ mod test { let mut mi_base = MipInfo { name: "A".repeat(254), version: 0, - components: HashMap::from([(MipComponent::Address, 0)]), - start: MassaTime::from(0), - timeout: MassaTime::from(2), - activation_delay: MassaTime::from(2), + components: BTreeMap::from([(MipComponent::Address, 0)]), + start: MassaTime::from_millis(0), + timeout: MassaTime::from_millis(2), + activation_delay: MassaTime::from_millis(2), }; // Note: we did not add the name ptr and hashmap ptr, only the data inside @@ -1141,6 +1165,7 @@ mod test { let mut all_state_size = 0; + let _time = MassaTime::now().unwrap(); let store_raw_: Vec<(MipInfo, MipState)> = (0..MIP_STORE_MAX_ENTRIES) .map(|_i| { mi_base.version += 1; @@ -1148,10 +1173,10 @@ mod test { .components .entry(MipComponent::Address) .and_modify(|e| *e += 1); - mi_base.start = mi_base.timeout.saturating_add(MassaTime::from(1)); - mi_base.timeout = mi_base.start.saturating_add(MassaTime::from(2)); + mi_base.start = mi_base.timeout.saturating_add(MassaTime::from_millis(1)); + mi_base.timeout = mi_base.start.saturating_add(MassaTime::from_millis(2)); - let state = advance_state_until(ComponentState::active(), &mi_base); + let state = advance_state_until(ComponentState::active(_time), &mi_base); all_state_size += size_of_val(&state.state); all_state_size += state.history.len() * (size_of::() + size_of::()); diff --git a/massa-wallet/Cargo.toml b/massa-wallet/Cargo.toml index 29179505bde..3c4f4b359b1 100644 --- a/massa-wallet/Cargo.toml +++ b/massa-wallet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "massa_wallet" -version = "0.1.0" +version = "0.23.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/massa-versioning-exports/Cargo.toml b/massa-xtask/Cargo.toml similarity index 52% rename from massa-versioning-exports/Cargo.toml rename to massa-xtask/Cargo.toml index bcfbaabb427..df0e34998f5 100644 --- a/massa-versioning-exports/Cargo.toml +++ b/massa-xtask/Cargo.toml @@ -1,13 +1,11 @@ [package] -name = "massa_versioning_exports" -version = "0.1.0" -authors = ["Massa Labs "] +name = "massa_xtask" +version = "0.23.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] - -[dev-dependencies] - -[features] +massa_models = { path = "../massa-models" } +toml_edit = "0.19.8" +walkdir = "2.3.3" diff --git a/massa-xtask/src/main.rs b/massa-xtask/src/main.rs new file mode 100644 index 00000000000..a962ef40764 --- /dev/null +++ b/massa-xtask/src/main.rs @@ -0,0 +1,15 @@ +mod update_package_versions; +use crate::update_package_versions::update_package_versions; +use std::env; + +/// to use it task: cargo xtask +/// example: cargo xtask update_package_versions to update package versions +fn main() { + let task = env::args().nth(1); + + match task.as_deref() { + // We can add more tasks here + Some("update_package_versions") => update_package_versions(), + _ => panic!("Unknown task"), + } +} diff --git a/massa-xtask/src/update_package_versions.rs b/massa-xtask/src/update_package_versions.rs new file mode 100644 index 00000000000..74b1917e271 --- /dev/null +++ b/massa-xtask/src/update_package_versions.rs @@ -0,0 +1,88 @@ +use massa_models::config::constants::VERSION; +use std::fs; +use std::path::Path; +use toml_edit::{Document, Formatted, Item, Value}; +use walkdir::WalkDir; + +/// check the version of the packages in the workspace +fn check_workspace_packages_version( + new_version: String, + workspace_path: &Path, +) -> Result> { + let mut nb_files_updated = 0; + // search for Cargo.toml files in the workspace + for entry in WalkDir::new(workspace_path) + .into_iter() + .filter_map(|e| e.ok()) + { + if entry.file_name() == "Cargo.toml" { + // if the Cargo.toml file is found, check the version and update it if necessary + if check_package_version(new_version.clone(), entry.path())? { + // if version has been updated + nb_files_updated += 1; + } + } + } + + Ok(nb_files_updated) +} + +/// check the version of the package in the Cargo.toml file +/// +/// Update it if version is different from the new version and if the package is a massa package +/// +/// return Ok(true) if the version has been updated +fn check_package_version( + new_version: String, + cargo_toml_path: &Path, +) -> Result> { + let cargo_toml_content = fs::read_to_string(cargo_toml_path)?; + let mut doc = cargo_toml_content.parse::()?; + + let mut update = false; + + if let Some(package) = doc["package"].as_table_mut() { + if package["name"].to_string().contains("massa") { + if let Some(version) = package.get_mut("version") { + let to_string = version.to_string().replace('\"', ""); + let actual_version = to_string.trim(); + if new_version.ne(actual_version) { + *version = Item::Value(Value::String(Formatted::new(new_version.clone()))); + println!( + "Updating version of package {} from {} to {}", + package["name"], actual_version, new_version + ); + update = true; + } + } + } + } + + if update { + let updated_cargo_toml_content = doc.to_string(); + fs::write(cargo_toml_path, updated_cargo_toml_content)?; + } + + Ok(update) +} + +pub(crate) fn update_package_versions() { + println!("Updating package versions"); + let mut to_string = VERSION.to_string(); + + if to_string.contains("TEST") || to_string.contains("SAND") { + // TestNet and Sandbox versions < 1.0.0 + to_string.replace_range(..4, "0"); + } else { + // Main net version >= 1.0.0 + // to_string.replace_range(..4, "1"); + panic!("todo for mainnet"); + }; + + let workspace_path = Path::new("./"); + + match check_workspace_packages_version(to_string, workspace_path) { + Err(e) => panic!("Error updating workspace packages version: {}", e), + Ok(nb_files_updated) => println!("{} files updated", nb_files_updated), + } +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 9287c75d1b4..904c6cc5fcb 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "nightly-2023-05-01" \ No newline at end of file +channel = "nightly-2023-06-01"