diff --git a/Cargo.lock b/Cargo.lock index 98ad5c1..8e3de64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -152,21 +152,24 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.2" +version = "0.63.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "36d860121800b2a9a94f9b5604b332d5cffb234ce17609ea479d723dbc9d3885" dependencies = [ "bitflags", "cexpr", "clang-sys", "lazy_static", "lazycell", + "log", "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", + "syn", + "which", ] [[package]] @@ -355,9 +358,9 @@ dependencies = [ [[package]] name = "ckb-app-config" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7414c0abc78929c2da3cf90b4683abc5f64f11b8e28ee850558413f647abd83b" +checksum = "64a8f88c3b4c865b9e707bc4361646983d624f61d9126243ca18c9714e70d9f8" dependencies = [ "ckb-build-info", "ckb-chain-spec", @@ -367,9 +370,9 @@ dependencies = [ "ckb-metrics-config", "ckb-pow", "ckb-resource", + "ckb-systemtime", "ckb-types", - "clap 3.2.21", - "faketime", + "clap 4.0.26", "path-clean", "rand 0.7.3", "serde", @@ -384,9 +387,9 @@ dependencies = [ [[package]] name = "ckb-async-runtime" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dece33da401969c572ebc5524b2eead27f402d49a8f99377c6b15ea66c1735e" +checksum = "4991b674543905a31bb6c4fa54986c72636bad42719680efb9d56ca6f1fe76da" dependencies = [ "ckb-logger", "ckb-spawn", @@ -396,9 +399,9 @@ dependencies = [ [[package]] name = "ckb-block-filter" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d7b36fb98833c54117dfd9dc7d43930130289227b5f5d9423851c2aed3421b" +checksum = "ad1ead73941d5b78bc5621d39350f7732fbc63261286d08b8453fab312364f3d" dependencies = [ "ckb-async-runtime", "ckb-logger", @@ -411,15 +414,15 @@ dependencies = [ [[package]] name = "ckb-build-info" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875484aae077e761089a7f707590f1ee9ed31e4bb8a330f93c822d2f4f40644b" +checksum = "4b2f1a5f9d25de2a2526b9ee56eb4c3fdf3e2192875b955d2facfbd554b54f9e" [[package]] name = "ckb-chain" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36aa70bc113bd9d33af7d7c4e494ed4188fe7e7eb40c72752bf8d1fae0ee46dd" +checksum = "03bf3d40af5b8e05ccd01eac403ab09aa08ff89ccc0f7416c004195933cfc93e" dependencies = [ "ckb-app-config", "ckb-chain-spec", @@ -434,18 +437,18 @@ dependencies = [ "ckb-shared", "ckb-stop-handler", "ckb-store", + "ckb-systemtime", "ckb-types", "ckb-verification", "ckb-verification-contextual", "ckb-verification-traits", - "faketime", ] [[package]] name = "ckb-chain-spec" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fccca15bb20b37f213bda1ff673fc25af8ee2e97f506ffc70de296d068034d63" +checksum = "e78df45446aaa86b06a77b8b145cffa79950e7ede293cebcd114a62e74c29dbf" dependencies = [ "ckb-constant", "ckb-crypto", @@ -465,24 +468,24 @@ dependencies = [ [[package]] name = "ckb-channel" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e718dfa7098b0bcce95c7fa573d96aad2f4c3ac886b6f35053f40c5e4894156" +checksum = "920f26cc48cadcaf6f7bcc3960fde9f9f355633b6361da8ef31e1e1c00fc8858" dependencies = [ "crossbeam-channel", ] [[package]] name = "ckb-constant" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ddc317ee0521b2a176f7197bd1922ee5e757b1c454150cf392baec49c8f31f4" +checksum = "302566408e5b296663ac5e8245bf71824ca2c7c2ef19a57fcc15939dd66527e9" [[package]] name = "ckb-crypto" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "105a3a011f3d29070d137ea1336d8761951281b39cb90db07b5b752c741b1d0f" +checksum = "aac31177b0a8bf3acd563c042775e40494e437b2bbbae96ac2473eec3a4da95d" dependencies = [ "ckb-fixed-hash", "faster-hex", @@ -494,9 +497,9 @@ dependencies = [ [[package]] name = "ckb-dao" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72bcf6f5504b8789f3f2f152e1e75665559782087f5ab90b42014f393522eabb" +checksum = "b70944b9013ead64287b87ac19608a3ca5ab19a9f29b7a76f637ad7831510e88" dependencies = [ "byteorder", "ckb-chain-spec", @@ -507,9 +510,9 @@ dependencies = [ [[package]] name = "ckb-dao-utils" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "970872ecd2bfe4072051b020b78061c97dfc07e2e8797e73e7b17c238891b91e" +checksum = "1929c9627923fe1d22151361d74f5a5aa0dda77016d020307a54486eae11cb3c" dependencies = [ "byteorder", "ckb-error", @@ -518,9 +521,9 @@ dependencies = [ [[package]] name = "ckb-db" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccd8da9ca694df652fc3055461e71713e5043a73f545b6692949fa492547d66" +checksum = "e661b65cf9cb8fc3492bebcca742245f77950dc9bd036dac760acc9c821d6b4c" dependencies = [ "ckb-app-config", "ckb-db-schema", @@ -532,9 +535,9 @@ dependencies = [ [[package]] name = "ckb-db-migration" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58f7c1f9ef698be873509fa04f0e5477387438bfd2e61f686c6b76473e199dad" +checksum = "5df2f6e23817e037bc9b8507fc49cffdaea9de0f2b074d4f477267d8af880859" dependencies = [ "ckb-db", "ckb-db-schema", @@ -546,15 +549,15 @@ dependencies = [ [[package]] name = "ckb-db-schema" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38dd9a3e896af70171a8ba8e08ba3f665ca90c5c805dd3eb544db5f4da4ca937" +checksum = "368e2352877063c40d5aadb2b6d97b306a8531ab45d1709c43f13468b5b24b0e" [[package]] name = "ckb-error" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd512b729186e6fa991b588647646e230db7728f71ba16087af21bded12ceb09" +checksum = "446a519d8a847d97f1c8ece739dc1748751a9a2179249c96c45cced0825a7aa5" dependencies = [ "anyhow", "ckb-occupied-capacity", @@ -564,9 +567,9 @@ dependencies = [ [[package]] name = "ckb-fixed-hash" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eba8f7006a63ad0945412012c89af6ad09d9b2b02962a869d0158a298fa8eca" +checksum = "00cbbc455b23748b32e06d16628a03e30d56ffa057f17093fdf5b42d4fb6c879" dependencies = [ "ckb-fixed-hash-core", "ckb-fixed-hash-macros", @@ -574,9 +577,9 @@ dependencies = [ [[package]] name = "ckb-fixed-hash-core" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44b15b464d37d8deeb66046011b3e01e642103b27d4752db4e74740ded732c73" +checksum = "cf4e644a4e026625b4be5a04cdf6c02043080e79feaf77d9cdbb2f0e6553f751" dependencies = [ "faster-hex", "serde", @@ -585,9 +588,9 @@ dependencies = [ [[package]] name = "ckb-fixed-hash-macros" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e86358f6eb595a0e6a2a5ef96d54d4c56e0a4bf822934d7b1fe9904b7208e4" +checksum = "e1cfc980ef88c217825172eb46df269f47890f5e78a38214416f13b3bd17a4b4" dependencies = [ "ckb-fixed-hash-core", "proc-macro2", @@ -597,9 +600,9 @@ dependencies = [ [[package]] name = "ckb-freezer" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c92afdf40f6675b44970a7d31e4dd45a514190b8036fdca41909a8b462ece12" +checksum = "f6372f89ab69d374b4c892e764a7c432a3a903d83bd4b7b4f0bca7f62fbe71bb" dependencies = [ "ckb-error", "ckb-logger", @@ -614,9 +617,9 @@ dependencies = [ [[package]] name = "ckb-hash" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038ad6840c4a89f4cd76b50621c4e6d82ca5f0d09fba707b1025016218d4a2d8" +checksum = "53d9b683e89ae4ffdd5aaf4172eab00b6bbe7ea24e2abf77d3eb850ba36e8983" dependencies = [ "blake2b-ref", "blake2b-rs", @@ -624,9 +627,9 @@ dependencies = [ [[package]] name = "ckb-indexer" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b9a5bcbe71a2d520ca97a86dd5b849333a9e256cc511407b5c2c2489c1f2f3" +checksum = "bdc0dd38ddc1464e4a7f6c5f586c5383661a9bf2555e9ea42ba04164549a7c8e" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -646,9 +649,9 @@ dependencies = [ [[package]] name = "ckb-jsonrpc-types" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca123e0b725e487cd49f202097c38c73c2a4d7e1c80d89549c492f058e84f29" +checksum = "ac087657eaf964e729f40b3c929d3dac74a2cd8bb38d5e588756e2495711f810" dependencies = [ "ckb-types", "faster-hex", @@ -658,9 +661,9 @@ dependencies = [ [[package]] name = "ckb-launcher" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff69144a1855c402b9b9ae97df0fe152d4bef928abdf427e20f3052393a4f01" +checksum = "218a1f8df259a67067257fa2a6c47b9fd1b8b246b75200866dec8203caf1e8b3" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -674,6 +677,7 @@ dependencies = [ "ckb-db-schema", "ckb-error", "ckb-freezer", + "ckb-hash", "ckb-jsonrpc-types", "ckb-light-client-protocol-server", "ckb-logger", @@ -700,9 +704,9 @@ dependencies = [ [[package]] name = "ckb-librocksdb-sys" -version = "7.3.3" +version = "7.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d106bb32241da368e67ce9a36545e1cc3c31b7faa9321817199b69bf1e085b4a" +checksum = "eb64bb11f8d53b9abb526b7e9d3c5fb437a8101e4210d540d2bcee0d18055313" dependencies = [ "bindgen", "cc", @@ -723,6 +727,7 @@ dependencies = [ "ckb-chain-spec", "ckb-constant", "ckb-error", + "ckb-hash", "ckb-jsonrpc-types", "ckb-launcher", "ckb-merkle-mountain-range", @@ -732,6 +737,7 @@ dependencies = [ "ckb-script", "ckb-shared", "ckb-store", + "ckb-systemtime", "ckb-traits", "ckb-tx-pool", "ckb-types", @@ -740,7 +746,6 @@ dependencies = [ "ctrlc", "dashmap 5.3.4", "env_logger 0.9.0", - "faketime", "golomb-coded-set", "jsonrpc-core", "jsonrpc-derive", @@ -763,9 +768,9 @@ dependencies = [ [[package]] name = "ckb-light-client-protocol-server" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00fd0a16ea124b8462162a7274fcba79e61890cf2ed60383900e373d2b6bcbdb" +checksum = "0d0b2b1b66d0f2622e3a24e9977d44fce0d620ccf430a43b158b0a075d6ce657" dependencies = [ "ckb-logger", "ckb-merkle-mountain-range", @@ -773,32 +778,33 @@ dependencies = [ "ckb-shared", "ckb-store", "ckb-sync", + "ckb-systemtime", "ckb-types", ] [[package]] name = "ckb-logger" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe1d406dd67f086bb64d17af697a4d42c3d7d85e2a677bfd8c04aea4b48510cf" +checksum = "911c4695ddf82f78da8f514b359092bbe231f58c2669c93b1cfc9a2030b125bb" dependencies = [ "log", ] [[package]] name = "ckb-logger-config" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b13d7cb022309a2061f14b30744bbc71c4f800e25011025175ca93f4018149" +checksum = "4ef3bee2498ee37642fe8561eb8882954001f3049c5e1a2246281238073cf8b3" dependencies = [ "serde", ] [[package]] name = "ckb-logger-service" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa42f3ec05543e0bc2be331aad8a6050bc9444acddbfab8167f892b65465095" +checksum = "a4eda4f44e71182cb5de02d34dc5b7ea260962cb43c9603c1ccf665aeb80ca99" dependencies = [ "backtrace", "ckb-channel", @@ -814,9 +820,9 @@ dependencies = [ [[package]] name = "ckb-memory-tracker" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aad8e11bb02057fe92635f2beb7f630e08a8b6fec36bd24e30c9b9ace102ca4" +checksum = "f1b3fa69353aa8080218faed95aa35f91e9b775b424b1a580c959417c8662faf" dependencies = [ "ckb-db", "ckb-logger", @@ -838,27 +844,27 @@ dependencies = [ [[package]] name = "ckb-metrics" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d3fa34a872934439439e184eee71fcded27cab382b5220b47412a03ccc8fee8" +checksum = "4848f8e03789b648bb9ef41260455a491fb7948db854c2367a90b1592c273100" dependencies = [ "opentelemetry", ] [[package]] name = "ckb-metrics-config" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83850e99e53bbf82b21dbbb8456f598d5e2e65821086a9921178a0d5d9ece59f" +checksum = "0a2bf9c86a97bb63002d8aa70b71caa3fd4f22e277587fe449e018330b289ca2" dependencies = [ "serde", ] [[package]] name = "ckb-migration-template" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf06bc62e788c8c43672d1ad979819ad88b6fe02786048a5ca7f69d1d57506f" +checksum = "8b598406f2c2067f28990494c630609164eb3929e02c8ab53b67acdadb9cc55d" dependencies = [ "quote", "syn", @@ -866,9 +872,9 @@ dependencies = [ [[package]] name = "ckb-multisig" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c0cad9c7fb0d8b0e3dc8c567ce7c58c5b44c51db0a413a9924704ea7d79e0c1" +checksum = "488ddc1e0c1ca602a0d92c81f88697d4f9591396a660b36c01b30247f1e416a4" dependencies = [ "ckb-crypto", "ckb-error", @@ -877,9 +883,9 @@ dependencies = [ [[package]] name = "ckb-network" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc974ecff310084ba7d549907f10e4672076e414dcc16b6439206dbbdcce7a4" +checksum = "367df91f6888551fa913f635c91a7557133da28fe49a04b7b7448f370a6a324a" dependencies = [ "bitflags", "bloom-filters", @@ -889,9 +895,9 @@ dependencies = [ "ckb-metrics", "ckb-spawn", "ckb-stop-handler", + "ckb-systemtime", "ckb-types", "ckb-util", - "faketime", "futures", "ipnetwork", "rand 0.7.3", @@ -906,9 +912,9 @@ dependencies = [ [[package]] name = "ckb-network-alert" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6043d0fc921be1a24a66076611793973925bfed2e37f2f240c4b958590843e35" +checksum = "52718dddee7cac16974372c5580cbbbae31073b01f8eede4801d092ac0e6931b" dependencies = [ "ckb-app-config", "ckb-error", @@ -917,18 +923,18 @@ dependencies = [ "ckb-multisig", "ckb-network", "ckb-notify", + "ckb-systemtime", "ckb-types", "ckb-util", - "faketime", "lru", "semver", ] [[package]] name = "ckb-notify" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7625e4fe234b419cdb30f13a6095b15484d84450b5637283a0baa16e0aa91dcb" +checksum = "be687ff66c1866b8069943de95b152b038c70290a5244c84abbd9d8443f7de4e" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -940,9 +946,9 @@ dependencies = [ [[package]] name = "ckb-occupied-capacity" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94f6c146d51b1b7f65511e6f16ef21b0d852aececc4ae87f78c3099c03e246a9" +checksum = "2d2a1dd0d4ba5dafba1e30d437c1148b20f42edb76b6794323e05bda626754eb" dependencies = [ "ckb-occupied-capacity-core", "ckb-occupied-capacity-macros", @@ -950,18 +956,18 @@ dependencies = [ [[package]] name = "ckb-occupied-capacity-core" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507187824418c845b519c64521b34578570b5851d170ff0101bc477ed0cdee2b" +checksum = "0ebba3d564098a84c83f4740e1dce48a5e2da759becdb47e3c7965f0808e6e92" dependencies = [ "serde", ] [[package]] name = "ckb-occupied-capacity-macros" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17825cb1ec37c5ad2f2c6690aa4cbfeb9a6d2af02463a66b1fa013e4f9e762aa" +checksum = "ce6321bba85cdf9724029d8c906851dd4a90906869b42f9100b16645a1261d4c" dependencies = [ "ckb-occupied-capacity-core", "quote", @@ -970,9 +976,9 @@ dependencies = [ [[package]] name = "ckb-pow" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3ea66fa0ac3dc84cada2d2696903afb28dbea1211d05f04b0ed444874ee8623" +checksum = "a9167b427f42874e68e20e6946d5211709979ff1d86c0061a71c2f6a6aa17659" dependencies = [ "byteorder", "ckb-hash", @@ -984,9 +990,9 @@ dependencies = [ [[package]] name = "ckb-proposal-table" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fac92b19a092dc2fdf3de3dad4ef8327a6b08ac119dca225b4c40bfe7755dd6" +checksum = "b95f16ef8ee19f6b0be5dda32997d43ae392b9dc2e30472d29811489cbe945ff" dependencies = [ "ckb-chain-spec", "ckb-logger", @@ -995,9 +1001,9 @@ dependencies = [ [[package]] name = "ckb-rational" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa5edf5377138c9457015a450b1a263996d100a5b6e21566157f410e1a5b95b3" +checksum = "a2519249f8d47fa758d3fb3cf3049327c69ce0f2acd79d61427482c8661d3dbd" dependencies = [ "numext-fixed-uint", "serde", @@ -1005,9 +1011,9 @@ dependencies = [ [[package]] name = "ckb-resource" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06cd89bd6c5bafeadc363cf0a479516355f16bd5758af4d7dace464d8756892b" +checksum = "a3abddc968d7f1e70584ab04180c347380a44acbe0b60e26cc96208ec8885279" dependencies = [ "ckb-system-scripts", "ckb-types", @@ -1020,9 +1026,9 @@ dependencies = [ [[package]] name = "ckb-reward-calculator" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18c82ccf7498515759e37d93683b449f8c4c99ed600c8a53a0759a31cfa47a5d" +checksum = "3315b5c96d33f6e9fc7d8181f161a1b333744013de1b3b7276f1a5f1fa88dff9" dependencies = [ "ckb-chain-spec", "ckb-dao", @@ -1034,9 +1040,9 @@ dependencies = [ [[package]] name = "ckb-rocksdb" -version = "0.18.3" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87b89828fd7e60e4d857f6f1a2f40343114eece1ca20859d4ca8371d4e00ce28" +checksum = "8e9d412caf8a7fe9080bf2c66209e1b6d9aab336c417b336adde47e184c78c41" dependencies = [ "ckb-librocksdb-sys", "libc", @@ -1045,9 +1051,9 @@ dependencies = [ [[package]] name = "ckb-rpc" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f353f1157014d3d4da2b0e3574fd9439fe39da0508f509405fd8f1ccc04931cc" +checksum = "a027fe3a6c1436ebf9390cf802cbd2fd4f4a9e01d8635aa81f6a398906fdb99e" dependencies = [ "ckb-app-config", "ckb-chain", @@ -1068,13 +1074,13 @@ dependencies = [ "ckb-shared", "ckb-store", "ckb-sync", + "ckb-systemtime", "ckb-traits", "ckb-tx-pool", "ckb-types", "ckb-util", "ckb-verification", "ckb-verification-traits", - "faketime", "itertools", "jsonrpc-core", "jsonrpc-derive", @@ -1089,18 +1095,18 @@ dependencies = [ [[package]] name = "ckb-rust-unstable-port" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02799203d17cce9b8b3c37cc4485bccde0b1b514e50ebb00fa141fa04ac78193" +checksum = "fa284ac7e6a9d4e854483ad8ecaeed92ef729e09b815a64dcde46dd91a5bb381" dependencies = [ "is_sorted", ] [[package]] name = "ckb-script" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9da7feb35e8997b29f1c987d441a89fb13ba8e429b64e09e1d35dff7b9290a8" +checksum = "12b4754a2f0ccea5ea1934822bd18a3a66c46344d8c3872cb20ffdcf0851fab9" dependencies = [ "byteorder", "ckb-chain-spec", @@ -1116,9 +1122,9 @@ dependencies = [ [[package]] name = "ckb-shared" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c95a74fe31aed065fd4b4f9d0f620043a6dcb8537f1b6f2a0b8c3511482f8da" +checksum = "ebb6b80ecd0f3e195645804e48beaa6c6e3b1a9f76807d35f775dd8650b10b9f" dependencies = [ "arc-swap", "ckb-async-runtime", @@ -1134,17 +1140,17 @@ dependencies = [ "ckb-snapshot", "ckb-stop-handler", "ckb-store", + "ckb-systemtime", "ckb-tx-pool", "ckb-types", "ckb-verification", - "faketime", ] [[package]] name = "ckb-snapshot" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae9aea472940ffb9cd06d91289b1d3833e91ce9636173957daa35afaa22084e4" +checksum = "732883c8989efcf004d9d7aef4e049528404db3d1c32e7efe690932d3a42fc99" dependencies = [ "arc-swap", "ckb-chain-spec", @@ -1160,15 +1166,15 @@ dependencies = [ [[package]] name = "ckb-spawn" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc0afc2cf33ceb4182e51444d825d114777ae8b5c10fa30617ad9b2e520269c" +checksum = "79a04f7b522b9f374c43b407c935a7e6104a3427193a56998c3580db43d1cd8a" [[package]] name = "ckb-stop-handler" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2b45c3bf269a77a8749a90bda73f906a54900e7335ae1ce5770b68aa19ddf83" +checksum = "59dd2b8aa19b07ebf3274e3b9562d81ab55943c6217e144796e3f553519680a2" dependencies = [ "ckb-channel", "ckb-logger", @@ -1178,9 +1184,9 @@ dependencies = [ [[package]] name = "ckb-store" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0ad2af8612abe39fffe043422fea009f1340edbf3552d8014dadf26a06ad90d" +checksum = "75290cc9daff0785596492899e760c7e6f4dfa86450f04f1a41221faf0cecd39" dependencies = [ "ckb-app-config", "ckb-chain-spec", @@ -1188,6 +1194,7 @@ dependencies = [ "ckb-db-schema", "ckb-error", "ckb-freezer", + "ckb-hash", "ckb-merkle-mountain-range", "ckb-traits", "ckb-types", @@ -1197,9 +1204,9 @@ dependencies = [ [[package]] name = "ckb-sync" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70cd64206a926d5f803747cc837a1d877a2b2bc3f25b922ba961e01efa690e5d" +checksum = "d0b6f690c5459b9f5c55b1872ad3ff4cb01934e4ad3dad85a50d3c046a5399c7" dependencies = [ "bitflags", "ckb-app-config", @@ -1209,13 +1216,13 @@ dependencies = [ "ckb-channel", "ckb-constant", "ckb-error", - "ckb-hash", "ckb-logger", "ckb-metrics", "ckb-network", "ckb-shared", "ckb-stop-handler", "ckb-store", + "ckb-systemtime", "ckb-traits", "ckb-tx-pool", "ckb-types", @@ -1223,7 +1230,6 @@ dependencies = [ "ckb-verification", "ckb-verification-traits", "dashmap 4.0.2", - "faketime", "futures", "governor", "keyed_priority_queue", @@ -1246,20 +1252,26 @@ dependencies = [ "phf", ] +[[package]] +name = "ckb-systemtime" +version = "0.108.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "243197680f69d6bb6cb1caf16199ce4a8162a258c757d5af8f727af0d8aabe9e" + [[package]] name = "ckb-traits" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4697c0acbf62b2994f1c26868cfc005e0ec4fc506849acfb9dc52f10af2a6df2" +checksum = "7e9d5827f20a396dfb785398db484fe50de93d76c02e1e32287832604a9dda91" dependencies = [ "ckb-types", ] [[package]] name = "ckb-tx-pool" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74ddfdd4d537baae7644b95ba71714c1f98e8329c276bf0a588bc0ea8cc794e2" +checksum = "64750d8656c173511fa63079d18886c67c4201b7d5f0c48f588567b7724a45fa" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -1275,11 +1287,11 @@ dependencies = [ "ckb-snapshot", "ckb-stop-handler", "ckb-store", + "ckb-systemtime", "ckb-traits", "ckb-types", "ckb-util", "ckb-verification", - "faketime", "hyper", "lru", "rand 0.8.5", @@ -1289,9 +1301,9 @@ dependencies = [ [[package]] name = "ckb-types" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f9f918d7f04fed733c528ec98ba8bdee31a885bd082e6ff263ca21d58e01378" +checksum = "9c22b3b1ca8f88a8f48e2f73321c0605281c9c6f1e1c4d651c6138265c22291e" dependencies = [ "bit-vec", "bytes 1.1.0", @@ -1311,9 +1323,9 @@ dependencies = [ [[package]] name = "ckb-util" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d60a55b4ad918828bdbf700f1b9949d74768b710c927e19f15282a2320a7ec71" +checksum = "03d165c6958601dfbfa4cd00c9263ecfb013b4ccb6d9e1d3187bfa62801abc7d" dependencies = [ "linked-hash-map", "once_cell", @@ -1323,9 +1335,9 @@ dependencies = [ [[package]] name = "ckb-verification" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d75a239fae4353c927039f464b584f1f25f01c302dd4a4a5e170950904d4a8" +checksum = "bbc1745cf02f6d628ac04cf58145b853a359ad4d74fdb418207e99773185ad11" dependencies = [ "ckb-chain-spec", "ckb-dao", @@ -1333,19 +1345,19 @@ dependencies = [ "ckb-error", "ckb-pow", "ckb-script", + "ckb-systemtime", "ckb-traits", "ckb-types", "ckb-verification-traits", "derive_more", - "faketime", "lru", ] [[package]] name = "ckb-verification-contextual" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a409da7af01d253d7e8e0009766f816088e1a530e12bcbe7179ad11986ef9f4e" +checksum = "6d7e00b796daa66579dca8aa963125f4b3eaa7db2dea5a4579695a5f8ccf95fb" dependencies = [ "ckb-async-runtime", "ckb-chain-spec", @@ -1356,20 +1368,20 @@ dependencies = [ "ckb-merkle-mountain-range", "ckb-reward-calculator", "ckb-store", + "ckb-systemtime", "ckb-traits", "ckb-types", "ckb-verification", "ckb-verification-traits", - "faketime", "rayon", "tokio", ] [[package]] name = "ckb-verification-traits" -version = "0.106.0" +version = "0.108.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fd864ed16342a662987c7cd16487779b03763ebcf9d639082adcd31daf105d" +checksum = "88de577410c2e72ccd18e00cb63fc0000d41be50604a895946a1566a02272730" dependencies = [ "bitflags", "ckb-error", @@ -1377,9 +1389,9 @@ dependencies = [ [[package]] name = "ckb-vm" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e8f7ba49aa55d08f8a575b69bc535cad65fdba75fea90856cee1fd3822a7a9" +checksum = "1223acc8054ce96f91c5d99d4942898d0bdadd618c3b14f1acd3e67212991d8e" dependencies = [ "byteorder", "bytes 1.1.0", @@ -1395,9 +1407,9 @@ dependencies = [ [[package]] name = "ckb-vm-definitions" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5445b62604e7ab2bf5abb37bf6cca7ac26b2e4a76fddb27ceb61850f24864d58" +checksum = "4af800ae2b6c54b70efa398dab015a09a52eeac2dd1ac3ad32c9bbe224974225" [[package]] name = "clang-sys" @@ -1420,7 +1432,7 @@ dependencies = [ "atty", "bitflags", "strsim 0.8.0", - "textwrap 0.11.0", + "textwrap", "unicode-width", "vec_map", "yaml-rust", @@ -1428,24 +1440,22 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.21" +version = "4.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ed5341b2301a26ab80be5cbdced622e80ed808483c52e45e3310a877d3b37d7" +checksum = "2148adefda54e14492fb9bddcc600b4344c5d1a3123bd666dcb939c6f0e0e57e" dependencies = [ "atty", "bitflags", "clap_lex", - "indexmap", "strsim 0.10.0", "termcolor", - "textwrap 0.15.0", ] [[package]] name = "clap_lex" -version = "0.2.4" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +checksum = "350b9cf31731f9957399229e9b2adc51eeabdfbe9d71d9a0552275fd12710d09" dependencies = [ "os_str_bytes", ] @@ -1713,16 +1723,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" -[[package]] -name = "faketime" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdcfc2bfe63c760bba09679ed6cb3a001d409c5195b4490dced0dc0aa800582b" -dependencies = [ - "js-sys", - "tempfile", -] - [[package]] name = "faster-hex" version = "0.6.1" @@ -3012,9 +3012,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.39" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" +checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" dependencies = [ "unicode-ident", ] @@ -3607,9 +3607,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.96" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -3740,12 +3740,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "textwrap" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" - [[package]] name = "thiserror" version = "1.0.31" @@ -4164,6 +4158,17 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "which" +version = "4.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" +dependencies = [ + "either", + "libc", + "once_cell", +] + [[package]] name = "wildmatch" version = "1.1.0" diff --git a/Cargo.toml b/Cargo.toml index d93675c..bc2bd93 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,21 +9,23 @@ homepage = "https://github.com/nervosnetwork/ckb-light-client" repository = "https://github.com/nervosnetwork/ckb-light-client" [dependencies] -ckb-app-config = "0.106.0" -ckb-async-runtime = "0.106.0" -ckb-constant = "0.106.0" -ckb-types = "0.106.0" -ckb-network = "0.106.0" -ckb-jsonrpc-types = "0.106.0" -ckb-error = "0.106.0" -ckb-script = "0.106.0" -ckb-chain-spec = "0.106.0" -ckb-traits = "0.106.0" -ckb-resource = "0.106.0" -ckb-verification = "0.106.0" +ckb-app-config = "0.108.0" +ckb-async-runtime = "0.108.0" +ckb-constant = "0.108.0" +ckb-types = "0.108.0" +ckb-network = "0.108.0" +ckb-jsonrpc-types = "0.108.0" +ckb-error = "0.108.0" +ckb-script = "0.108.0" +ckb-chain-spec = "0.108.0" +ckb-traits = "0.108.0" +ckb-resource = "0.108.0" +ckb-verification = "0.108.0" +ckb-systemtime = "0.108.0" +ckb-hash = "0.108.0" ckb-merkle-mountain-range = "0.5.1" golomb-coded-set = "0.2.0" -rocksdb = { package = "ckb-rocksdb", version ="=0.18.3", features = ["snappy"], default-features = false } +rocksdb = { package = "ckb-rocksdb", version ="=0.19.0", features = ["snappy"], default-features = false } numext-fixed-uint = { version = "0.1", features = ["support_rand", "support_heapsize", "support_serde"] } anyhow = "1.0.56" thiserror = "1.0.30" @@ -37,18 +39,17 @@ path-clean = "0.1.0" rand = "0.8.5" dashmap = "5.3" linked-hash-map = "0.5.6" -faketime = "0.2.1" jsonrpc-core = "18.0" jsonrpc-derive = "18.0" jsonrpc-http-server = "18.0" jsonrpc-server-utils = "18.0" [dev-dependencies] -ckb-launcher = "0.106.0" -ckb-shared = "0.106.0" -ckb-chain = "0.106.0" -ckb-tx-pool = "0.106.0" -ckb-store = "0.106.0" +ckb-launcher = "0.108.0" +ckb-shared = "0.108.0" +ckb-chain = "0.108.0" +ckb-tx-pool = "0.108.0" +ckb-store = "0.108.0" tempfile = "3.0" rand = "0.6" serde_json = "1.0" diff --git a/src/main.rs b/src/main.rs index 12b467a..62e735d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -14,6 +14,9 @@ mod types; mod utils; mod verify; +// TODO Remove the patches if the code was merged into CKB. +mod patches; + use config::AppConfig; fn main() -> anyhow::Result<()> { diff --git a/src/patches.rs b/src/patches.rs new file mode 100644 index 0000000..e7e17fd --- /dev/null +++ b/src/patches.rs @@ -0,0 +1,73 @@ +use std::io::{Cursor, Write}; + +#[cfg(not(test))] +use ckb_hash::blake2b_256; +use golomb_coded_set::{GCSFilterWriter, SipHasher24Builder, M, P}; + +use ckb_types::{core::TransactionView, packed, prelude::*}; + +/// Provides data for building block filter data. +pub trait FilterDataProvider { + /// Finds the cell through its out point. + fn cell(&self, out_point: &packed::OutPoint) -> Option; +} + +/// Builds filter data for transactions. +pub fn build_filter_data( + provider: P, + transactions: &[TransactionView], +) -> (Vec, Vec) { + let mut filter_writer = Cursor::new(Vec::new()); + let mut filter = build_gcs_filter(&mut filter_writer); + let mut missing_out_points = Vec::new(); + for tx in transactions { + if !tx.is_cellbase() { + for out_point in tx.input_pts_iter() { + if let Some(input_cell) = provider.cell(&out_point) { + filter.add_element(input_cell.calc_lock_hash().as_slice()); + if let Some(type_script) = input_cell.type_().to_opt() { + filter.add_element(type_script.calc_script_hash().as_slice()); + } + } else { + missing_out_points.push(out_point); + } + } + } + for output_cell in tx.outputs() { + filter.add_element(output_cell.calc_lock_hash().as_slice()); + if let Some(type_script) = output_cell.type_().to_opt() { + filter.add_element(type_script.calc_script_hash().as_slice()); + } + } + } + filter + .finish() + .expect("flush to memory writer should be OK"); + let filter_data = filter_writer.into_inner(); + (filter_data, missing_out_points) +} + +/// Calculates a block filter hash. +#[cfg(not(test))] +pub fn calc_filter_hash( + parent_block_filter_hash: &packed::Byte32, + filter_data: &packed::Bytes, +) -> [u8; 32] { + blake2b_256( + [ + parent_block_filter_hash.as_slice(), + filter_data.calc_raw_data_hash().as_slice(), + ] + .concat(), + ) +} + +// TODO Use real block filter hashes in unit tests. +#[cfg(test)] +pub fn calc_filter_hash(_: &packed::Byte32, _: &packed::Bytes) -> [u8; 32] { + Default::default() +} + +fn build_gcs_filter(out: &mut dyn Write) -> GCSFilterWriter { + GCSFilterWriter::new(out, SipHasher24Builder::new(0, 0), M, P) +} diff --git a/src/protocols/filter/block_filter.rs b/src/protocols/filter/block_filter.rs index 7f179ec..a311fb9 100644 --- a/src/protocols/filter/block_filter.rs +++ b/src/protocols/filter/block_filter.rs @@ -6,13 +6,21 @@ use ckb_constant::sync::INIT_BLOCKS_IN_TRANSIT_PER_PEER; use ckb_network::{async_trait, bytes::Bytes, CKBProtocolContext, CKBProtocolHandler, PeerIndex}; use ckb_types::{core::BlockNumber, packed, prelude::*}; use golomb_coded_set::{GCSFilterReader, SipHasher24Builder, M, P}; -use log::{debug, error, info, trace, warn}; +use log::{debug, error, info, log_enabled, trace, warn, Level}; +use rand::seq::SliceRandom as _; use std::io::Cursor; use std::sync::RwLock; use std::time::Instant; use std::{sync::Arc, time::Duration}; pub(crate) const GET_BLOCK_FILTERS_TOKEN: u64 = 0; +pub(crate) const GET_BLOCK_FILTER_HASHES_TOKEN: u64 = 1; +pub(crate) const GET_BLOCK_FILTER_CHECK_POINTS_TOKEN: u64 = 2; + +pub(crate) const GET_BLOCK_FILTERS_DURATION: Duration = Duration::from_secs(3); +pub(crate) const GET_BLOCK_FILTER_HASHES_DURATION: Duration = Duration::from_secs(10); +pub(crate) const GET_BLOCK_FILTER_CHECK_POINTS_DURATION: Duration = Duration::from_secs(30); + const GET_BLOCK_FILTERS_TIMEOUT: Duration = Duration::from_secs(15); pub struct FilterProtocol { @@ -68,17 +76,145 @@ impl FilterProtocol { .collect() } - fn should_ask(&self) -> bool { + fn should_ask(&self, immediately: bool) -> bool { !self.storage.is_filter_scripts_empty() - && (self.last_ask_time.read().unwrap().is_none() + && (immediately + || self.last_ask_time.read().unwrap().is_none() || self.last_ask_time.read().unwrap().unwrap().elapsed() > GET_BLOCK_FILTERS_TIMEOUT) } pub fn update_min_filtered_block_number(&self, block_number: BlockNumber) { self.storage.update_min_filtered_block_number(block_number); + self.peers.update_min_filtered_block_number(block_number); self.last_ask_time.write().unwrap().replace(Instant::now()); } + + pub(crate) fn try_send_get_block_filters( + &self, + nc: Arc, + immediately: bool, + ) { + let start_number = self.storage.get_min_filtered_block_number() + 1; + let (finalized_check_point_index, _) = self.storage.get_last_check_point(); + let could_ask_more = self + .peers + .could_request_more_block_filters(finalized_check_point_index, start_number); + if log_enabled!(Level::Trace) { + let finalized_check_point_number = self + .peers + .calc_check_point_number(finalized_check_point_index); + let (cached_check_point_index, cached_hashes) = + self.peers.get_cached_block_filter_hashes(); + let cached_check_point_number = + self.peers.calc_check_point_number(cached_check_point_index); + let next_cached_check_point_number = self + .peers + .calc_check_point_number(cached_check_point_index + 1); + trace!( + "could request block filters from {} or not: {}, \ + finalized: index {}, number {}; \ + cached: index {}, number {}, length {}; \ + next cached: number {}", + start_number, + could_ask_more, + finalized_check_point_index, + finalized_check_point_number, + cached_check_point_index, + cached_check_point_number, + cached_hashes.len(), + next_cached_check_point_number + ); + } + if let Some((peer, _prove_state)) = self + .peers + .get_all_prove_states() + .iter() + .max_by_key(|(_, prove_state)| prove_state.get_last_header().total_difficulty()) + { + debug!("found best proved peer {}", peer); + + let mut matched_blocks = self.peers.matched_blocks().write().expect("poisoned"); + if let Some((db_start_number, blocks_count, db_blocks)) = + self.storage.get_earliest_matched_blocks() + { + debug!( + "try recover matched blocks from storage, start_number={}, \ + blocks_count={}, matched_count: {}", + db_start_number, + blocks_count, + matched_blocks.len(), + ); + if matched_blocks.is_empty() { + // recover matched blocks from storage + self.peers + .add_matched_blocks(&mut matched_blocks, db_blocks); + let tip_header = self.storage.get_tip_header(); + prove_or_download_matched_blocks( + Arc::clone(&self.peers), + &tip_header, + &matched_blocks, + nc.as_ref(), + INIT_BLOCKS_IN_TRANSIT_PER_PEER, + ); + if could_ask_more { + debug!( + "send get block filters to {}, start_number={}", + peer, start_number + ); + self.send_get_block_filters(nc, *peer, start_number); + } + } + } else if self.should_ask(immediately) && could_ask_more { + debug!( + "send get block filters to {}, start_number={}", + peer, start_number + ); + self.send_get_block_filters(nc, *peer, start_number); + } else { + trace!("no block filters is required to download"); + } + } else { + debug!("cannot find peers which are proved"); + } + } + + pub(crate) fn try_send_get_block_filter_hashes(&self, nc: Arc) { + let min_filtered_block_number = self.storage.get_min_filtered_block_number(); + self.peers + .update_min_filtered_block_number(min_filtered_block_number); + let finalized_check_point_index = self.storage.get_max_check_point_index(); + let cached_check_point_index = self.peers.get_cached_block_filter_hashes().0; + if let Some(start_number) = self + .peers + .if_cached_block_filter_hashes_require_update(finalized_check_point_index) + { + let best_peers = self + .peers + .get_all_proved_check_points() + .into_iter() + .filter_map(|(peer_index, (cpindex, _check_points))| { + if cpindex >= finalized_check_point_index { + Some(peer_index) + } else { + None + } + }) + .collect::>(); + if let Some(peer) = best_peers.choose(&mut rand::thread_rng()).cloned() { + self.send_get_block_filter_hashes(Arc::clone(&nc), peer, start_number); + } + } else if cached_check_point_index >= finalized_check_point_index { + let peers = self + .peers + .get_peers_which_require_more_latest_block_filter_hashes( + finalized_check_point_index, + ); + for (peer, start_number) in peers { + self.send_get_block_filter_hashes(Arc::clone(&nc), peer, start_number); + } + } + } } impl FilterProtocol { @@ -89,7 +225,12 @@ impl FilterProtocol { message: packed::BlockFilterMessageUnionReader<'_>, ) -> Status { match message { - // TODO: implement check points message processing + packed::BlockFilterMessageUnionReader::BlockFilterCheckPoints(reader) => { + components::BlockFilterCheckPointsProcess::new(reader, self, nc, peer).execute() + } + packed::BlockFilterMessageUnionReader::BlockFilterHashes(reader) => { + components::BlockFilterHashesProcess::new(reader, self, nc, peer).execute() + } packed::BlockFilterMessageUnionReader::BlockFilters(reader) => { components::BlockFiltersProcess::new(reader, self, nc, peer).execute() } @@ -101,8 +242,13 @@ impl FilterProtocol { &self, nc: Arc, peer: PeerIndex, - start_number: u64, + start_number: BlockNumber, ) { + trace!( + "request block filter from peer {}, starts at {}", + peer, + start_number + ); let content = packed::GetBlockFilters::new_builder() .start_number(start_number.pack()) .build(); @@ -110,7 +256,56 @@ impl FilterProtocol { .set(content) .build(); if let Err(err) = nc.send_message_to(peer, message.as_bytes()) { - let error_message = format!("nc.send_message BlockFilterMessage, error: {:?}", err); + let error_message = format!("nc.send_message GetBlockFilters, error: {:?}", err); + error!("{}", error_message); + } + } + + pub(crate) fn send_get_block_filter_hashes( + &self, + nc: Arc, + peer: PeerIndex, + start_number: BlockNumber, + ) { + trace!( + "request block filter hashes from peer {}, starts at {}", + peer, + start_number + ); + let content = packed::GetBlockFilterHashes::new_builder() + .start_number(start_number.pack()) + .build(); + let message = packed::BlockFilterMessage::new_builder() + .set(content) + .build(); + if let Err(err) = nc.send_message_to(peer, message.as_bytes()) { + let error_message = format!("nc.send_message GetBlockFilterHashes, error: {:?}", err); + error!("{}", error_message); + } + } + + pub(crate) fn send_get_block_filter_check_points( + &self, + nc: Arc, + peer: PeerIndex, + start_number: BlockNumber, + ) { + trace!( + "request check points from peer {}, starts at {}", + peer, + start_number + ); + let content = packed::GetBlockFilterCheckPoints::new_builder() + .start_number(start_number.pack()) + .build(); + let message = packed::BlockFilterMessage::new_builder() + .set(content) + .build(); + if let Err(err) = nc.send_message_to(peer, message.as_bytes()) { + let error_message = format!( + "nc.send_message GetBlockFilterCheckPoints, error: {:?}", + err + ); error!("{}", error_message); } } @@ -119,9 +314,21 @@ impl FilterProtocol { #[async_trait] impl CKBProtocolHandler for FilterProtocol { async fn init(&mut self, nc: Arc) { - nc.set_notify(Duration::from_secs(3), GET_BLOCK_FILTERS_TOKEN) + nc.set_notify(GET_BLOCK_FILTERS_DURATION, GET_BLOCK_FILTERS_TOKEN) .await .expect("set_notify should be ok"); + nc.set_notify( + GET_BLOCK_FILTER_HASHES_DURATION, + GET_BLOCK_FILTER_HASHES_TOKEN, + ) + .await + .expect("set_notify should be ok"); + nc.set_notify( + GET_BLOCK_FILTER_CHECK_POINTS_DURATION, + GET_BLOCK_FILTER_CHECK_POINTS_TOKEN, + ) + .await + .expect("set_notify should be ok"); } async fn connected( @@ -161,79 +368,21 @@ impl CKBProtocolHandler for FilterProtocol { let item_name = msg.item_name(); let status = self.try_process(Arc::clone(&nc), peer, msg); - trace!( - "FilterProtocol.received peer={}, message={}", - peer, - item_name - ); - if let Some(ban_time) = status.should_ban() { - error!( - "process {} from {}, ban {:?} since result is {}", - item_name, peer, ban_time, status - ); - nc.ban_peer(peer, ban_time, status.to_string()); - } else if status.should_warn() { - warn!("process {} from {}, result is {}", item_name, peer, status); - } else if !status.is_ok() { - debug!("process {} from {}, result is {}", item_name, peer, status); - } + status.process(nc, peer, "BlockFilter", item_name); } async fn notify(&mut self, nc: Arc, token: u64) { match token { GET_BLOCK_FILTERS_TOKEN => { - let proved_peers = self.peers.get_peers_which_are_proved(); - if let Some((peer, prove_state)) = proved_peers - .iter() - .max_by_key(|(_, prove_state)| prove_state.get_last_header().total_difficulty()) - { - let start_number = self.storage.get_min_filtered_block_number() + 1; - let prove_state_number = prove_state.get_last_header().header().number(); - debug!( - "found proved peer {}, start_number: {}, prove_state number: {:?}", - peer, - start_number, - prove_state.get_last_header().header().number() - ); - - let mut matched_blocks = self.peers.matched_blocks().write().expect("poisoned"); - if let Some((db_start_number, blocks_count, db_blocks)) = - self.storage.get_earliest_matched_blocks() - { - if matched_blocks.is_empty() { - debug!( - "recover matched blocks from storage, start_number={}, blocks_count={}, matched_count: {}", - db_start_number, blocks_count, - matched_blocks.len(), - ); - // recover matched blocks from storage - self.peers - .add_matched_blocks(&mut matched_blocks, db_blocks); - let tip_header = self.storage.get_tip_header(); - prove_or_download_matched_blocks( - Arc::clone(&self.peers), - &tip_header, - &matched_blocks, - nc.as_ref(), - INIT_BLOCKS_IN_TRANSIT_PER_PEER, - ); - if prove_state_number >= start_number { - debug!( - "send get block filters to {}, start_number={}", - peer, start_number - ); - self.send_get_block_filters(Arc::clone(&nc), *peer, start_number); - } - } - } else if self.should_ask() && prove_state_number >= start_number { - debug!( - "send get block filters to {}, start_number={}", - peer, start_number - ); - self.send_get_block_filters(Arc::clone(&nc), *peer, start_number); - } - } else { - debug!("cannot find peers which are proved"); + self.try_send_get_block_filters(nc, false); + } + GET_BLOCK_FILTER_HASHES_TOKEN => { + self.try_send_get_block_filter_hashes(nc); + } + GET_BLOCK_FILTER_CHECK_POINTS_TOKEN => { + let peers = self.peers.get_peers_which_require_more_check_points(); + for (peer, start_number) in peers { + self.send_get_block_filter_check_points(Arc::clone(&nc), peer, start_number); } } _ => unreachable!(), diff --git a/src/protocols/filter/components/block_filter_check_points_process.rs b/src/protocols/filter/components/block_filter_check_points_process.rs new file mode 100644 index 0000000..f7be2ec --- /dev/null +++ b/src/protocols/filter/components/block_filter_check_points_process.rs @@ -0,0 +1,79 @@ +use std::sync::Arc; + +use ckb_network::{CKBProtocolContext, PeerIndex}; +use ckb_types::{core::BlockNumber, packed, prelude::*}; +use log::trace; + +use crate::protocols::{FilterProtocol, Status, StatusCode}; + +pub struct BlockFilterCheckPointsProcess<'a> { + message: packed::BlockFilterCheckPointsReader<'a>, + protocol: &'a FilterProtocol, + nc: Arc, + peer_index: PeerIndex, +} + +impl<'a> BlockFilterCheckPointsProcess<'a> { + pub fn new( + message: packed::BlockFilterCheckPointsReader<'a>, + protocol: &'a FilterProtocol, + nc: Arc, + peer_index: PeerIndex, + ) -> Self { + Self { + message, + nc, + protocol, + peer_index, + } + } + + pub fn execute(self) -> Status { + let peer_state = if let Some(peer_state) = self.protocol.peers.get_state(&self.peer_index) { + peer_state + } else { + let errmsg = "peer is disconnected"; + return StatusCode::Ignore.with_context(errmsg); + }; + + let prove_number = if let Some(prove_state) = peer_state.get_prove_state() { + prove_state.get_last_header().header().number() + } else { + let errmsg = "peer is not proved"; + return StatusCode::Ignore.with_context(errmsg); + }; + + let start_number: BlockNumber = self.message.start_number().unpack(); + let check_points = self + .message + .block_filter_hashes() + .iter() + .map(|item| item.to_entity()) + .collect::>(); + + trace!( + "peer {}: last-state: {}, add check points (start: {}, len: {})", + self.peer_index, + peer_state, + start_number, + check_points.len() + ); + + let next_start_number_opt = return_if_failed!(self.protocol.peers.add_check_points( + self.peer_index, + prove_number, + start_number, + &check_points + )); + + if let Some(next_start_number) = next_start_number_opt { + self.protocol.send_get_block_filter_check_points( + self.nc, + self.peer_index, + next_start_number, + ); + } + + Status::ok() + } +} diff --git a/src/protocols/filter/components/block_filter_hashes_process.rs b/src/protocols/filter/components/block_filter_hashes_process.rs new file mode 100644 index 0000000..adb9920 --- /dev/null +++ b/src/protocols/filter/components/block_filter_hashes_process.rs @@ -0,0 +1,248 @@ +use std::sync::Arc; + +use ckb_network::{CKBProtocolContext, PeerIndex}; +use ckb_types::{core::BlockNumber, packed, prelude::*}; +use log::trace; +use rand::seq::SliceRandom as _; + +use crate::protocols::{FilterProtocol, Status, StatusCode}; + +pub struct BlockFilterHashesProcess<'a> { + message: packed::BlockFilterHashesReader<'a>, + protocol: &'a FilterProtocol, + nc: Arc, + peer_index: PeerIndex, +} + +impl<'a> BlockFilterHashesProcess<'a> { + pub fn new( + message: packed::BlockFilterHashesReader<'a>, + protocol: &'a FilterProtocol, + nc: Arc, + peer_index: PeerIndex, + ) -> Self { + Self { + message, + nc, + protocol, + peer_index, + } + } + + pub fn execute(self) -> Status { + let peer_state = if let Some(peer_state) = self.protocol.peers.get_state(&self.peer_index) { + peer_state + } else { + let errmsg = "peer is disconnected"; + return StatusCode::Ignore.with_context(errmsg); + }; + + let prove_number = if let Some(prove_state) = peer_state.get_prove_state() { + prove_state.get_last_header().header().number() + } else { + let errmsg = "peer is not proved"; + return StatusCode::Ignore.with_context(errmsg); + }; + + let start_number: BlockNumber = self.message.start_number().unpack(); + let parent_block_filter_hash = self.message.parent_block_filter_hash().to_entity(); + let block_filter_hashes = self + .message + .block_filter_hashes() + .iter() + .map(|item| item.to_entity()) + .collect::>(); + + trace!( + "peer {}: last-state: {}, add block filter hashes (start: {}, len: {}) \ + and parent block filter hash is {:#x}", + self.peer_index, + peer_state, + start_number, + block_filter_hashes.len(), + parent_block_filter_hash + ); + + let (finalized_check_point_index, finalized_check_point) = + self.protocol.storage.get_last_check_point(); + let finalized_check_point_number = self + .protocol + .peers + .calc_check_point_number(finalized_check_point_index); + + let (cached_check_point_index, cached_hashes) = + self.protocol.peers.get_cached_block_filter_hashes(); + let cached_check_point_number = self + .protocol + .peers + .calc_check_point_number(cached_check_point_index); + let next_cached_check_point_number = self + .protocol + .peers + .calc_check_point_number(cached_check_point_index + 1); + + trace!( + "finalized: index {}, number {}; \ + cached: index {}, number {}, length {}; \ + next cached: number {}", + finalized_check_point_index, + finalized_check_point_number, + cached_check_point_index, + cached_check_point_number, + cached_hashes.len(), + next_cached_check_point_number + ); + + if start_number <= finalized_check_point_number + && cached_check_point_number < start_number + && start_number <= next_cached_check_point_number + { + // Check block numbers. + let cached_last_number = cached_check_point_number + cached_hashes.len() as BlockNumber; + if start_number > cached_last_number + 1 { + let errmsg = format!( + "start number ({}) is continuous with cached last number ({})", + start_number, cached_last_number + ); + return StatusCode::Ignore.with_context(errmsg); + } + + // Check cached block filter hashes. + let (cached_check_point, next_cached_check_point) = { + let cached_check_points = self + .protocol + .storage + .get_check_points(cached_check_point_index, 2); + ( + cached_check_points[0].clone(), + cached_check_points[1].clone(), + ) + }; + + if start_number == cached_check_point_number + 1 { + if cached_check_point != parent_block_filter_hash { + let errmsg = format!( + "check point for block {} is {:#x} but parent hash is {:#x}", + start_number, cached_check_point, parent_block_filter_hash + ); + return StatusCode::BlockFilterHashesIsUnexpected.with_context(errmsg); + } + } else { + // This branch must be satisfied `start_number > cached_check_point_number + 1`. + let diff = start_number - cached_check_point_number; + let index = diff as usize - 2; + let cached_hash = &cached_hashes[index]; + if *cached_hash != parent_block_filter_hash { + let errmsg = format!( + "cached hash for block {} is {:#x} but parent hash is {:#x}", + start_number - 1, + cached_hash, + parent_block_filter_hash + ); + return StatusCode::Ignore.with_context(errmsg); + } + }; + let end_number = start_number + block_filter_hashes.len() as BlockNumber - 1; + if end_number > next_cached_check_point_number { + let diff = end_number - next_cached_check_point_number; + let index = block_filter_hashes.len() - (diff as usize) - 1; + let new_hash = &block_filter_hashes[index]; + if next_cached_check_point != *new_hash { + let errmsg = format!( + "check point for block {} is {:#x} but got {:#}", + next_cached_check_point_number, next_cached_check_point, new_hash + ); + return StatusCode::BlockFilterHashesIsUnexpected.with_context(errmsg); + } + } + let index_offset = (start_number - (cached_check_point_number + 1)) as usize; + for (index, (old_hash, new_hash)) in cached_hashes[index_offset..] + .iter() + .zip(block_filter_hashes.iter()) + .enumerate() + { + if old_hash != new_hash { + let number = start_number + (index_offset + index) as BlockNumber; + let errmsg = format!( + "cached hash for block {} is {:#x} but new is {:#}", + number, old_hash, new_hash + ); + return StatusCode::Ignore.with_context(errmsg); + } + } + + // Update cached block filter hashes. + let start_index = cached_hashes[index_offset..].len(); + let mut new_cached_hashes = cached_hashes; + if end_number > next_cached_check_point_number { + let excess_size = (end_number - next_cached_check_point_number) as usize; + let new_size = block_filter_hashes.len() - excess_size; + new_cached_hashes.extend_from_slice(&block_filter_hashes[start_index..new_size]); + } else { + new_cached_hashes.extend_from_slice(&block_filter_hashes[start_index..]); + } + self.protocol + .peers + .update_cached_block_filter_hashes(new_cached_hashes); + + if end_number < next_cached_check_point_number { + let best_peers = self + .protocol + .peers + .get_all_proved_check_points() + .into_iter() + .filter_map(|(peer_index, (cpindex, _check_points))| { + if peer_index == self.peer_index { + None + } else if cpindex >= finalized_check_point_index { + Some(peer_index) + } else { + None + } + }) + .collect::>(); + let best_peer = best_peers + .choose(&mut rand::thread_rng()) + .cloned() + .unwrap_or(self.peer_index); + self.protocol + .send_get_block_filter_hashes(self.nc, best_peer, end_number + 1); + } else { + // if couldn't request more block filter hashes, + // check if could request more block filters. + self.protocol.try_send_get_block_filters(self.nc, true); + } + } else if start_number > finalized_check_point_number { + let next_start_number_opt = + return_if_failed!(self.protocol.peers.update_latest_block_filter_hashes( + self.peer_index, + prove_number, + finalized_check_point_index, + &finalized_check_point, + start_number, + &parent_block_filter_hash, + &block_filter_hashes + )); + + if let Some(next_start_number) = next_start_number_opt { + self.protocol.send_get_block_filter_hashes( + self.nc, + self.peer_index, + next_start_number, + ); + } + } else { + let errmsg = format!( + "unknown start block number: {}, \ + cached in ({},{}], finalized starts at {}", + start_number, + cached_check_point_number, + next_cached_check_point_number, + finalized_check_point_number + ); + return StatusCode::Ignore.with_context(errmsg); + } + + Status::ok() + } +} diff --git a/src/protocols/filter/components/block_filters_process.rs b/src/protocols/filter/components/block_filters_process.rs index 0d99c43..bfe3c9b 100644 --- a/src/protocols/filter/components/block_filters_process.rs +++ b/src/protocols/filter/components/block_filters_process.rs @@ -7,7 +7,9 @@ use ckb_types::core::BlockNumber; use ckb_types::{packed, prelude::*}; use log::{info, trace, warn}; use rand::seq::SliceRandom; -use std::sync::Arc; +use std::{cmp, sync::Arc}; + +use crate::patches::calc_filter_hash; pub struct BlockFiltersProcess<'a> { message: packed::BlockFiltersReader<'a>, @@ -43,11 +45,11 @@ impl<'a> BlockFiltersProcess<'a> { } let peer_state = peer_state_opt.expect("checked Some"); - let (prove_state_block_number, prove_state_block_hash) = if let Some(header) = peer_state + let prove_state_block_hash = if let Some(header) = peer_state .get_prove_state() .map(|prove_state| prove_state.get_last_header().header()) { - (header.number(), header.hash()) + header.hash() } else { warn!("ignoring, peer {} prove state is none", self.peer); return Status::ok(); @@ -69,85 +71,183 @@ impl<'a> BlockFiltersProcess<'a> { .storage .update_block_number(min_filtered_block_number); } - } else { - let filters_count = block_filters.filters().len(); - let blocks_count = block_filters.block_hashes().len(); - - if filters_count != blocks_count { - let error_message = format!( - "filters length ({}) not equal to block_hashes length ({})", - filters_count, blocks_count - ); - return StatusCode::MalformedProtocolMessage.with_context(error_message); - } + return Status::ok(); + } - if filters_count == 0 { - info!("no new filters, ignore peer: {}", self.peer); - return Status::ok(); - } + let filters_count = block_filters.filters().len(); + let blocks_count = block_filters.block_hashes().len(); - if prove_state_block_number < start_number { - warn!( - "ignoring, peer {} prove_state_block_number {} is smaller than start_number {}", - self.peer, prove_state_block_number, start_number - ); - return Status::ok(); - } - let limit = (prove_state_block_number - start_number + 1) as usize; - let possible_match_blocks = self.filter.check_filters_data(block_filters, limit); - let possible_match_blocks_len = possible_match_blocks.len(); - trace!( - "peer {}, matched blocks: {}", - self.peer, - possible_match_blocks_len + if filters_count != blocks_count { + let error_message = format!( + "filters length ({}) not equal to block_hashes length ({})", + filters_count, blocks_count ); - let actual_blocks_count = blocks_count.min(limit); - let tip_header = self.filter.storage.get_tip_header(); - let filtered_block_number = start_number - 1 + actual_blocks_count as BlockNumber; + return StatusCode::MalformedProtocolMessage.with_context(error_message); + } - let mut matched_blocks = self - .filter - .peers - .matched_blocks() - .write() - .expect("poisoned"); - if possible_match_blocks_len != 0 { - let blocks = possible_match_blocks - .iter() - .map(|block_hash| (block_hash.clone(), block_hash == &prove_state_block_hash)) - .collect::>(); - self.filter.storage.add_matched_blocks( + if filters_count == 0 { + info!("no new filters, ignore peer: {}", self.peer); + return Status::ok(); + } + + let (finalized_check_point_index, finalized_check_point_hash) = + self.filter.storage.get_last_check_point(); + let finalized_check_point_number = self + .filter + .peers + .calc_check_point_number(finalized_check_point_index); + + let (mut parent_block_filter_hash, expected_block_filter_hashes) = + if start_number <= finalized_check_point_number { + // Use cached block filter hashes to check the block filters. + let (cached_check_point_index, mut cached_block_filter_hashes) = + self.filter.peers.get_cached_block_filter_hashes(); + let cached_check_point_number = self + .filter + .peers + .calc_check_point_number(cached_check_point_index); + let next_cached_check_point_number = self + .filter + .peers + .calc_check_point_number(cached_check_point_index + 1); + trace!( + "check block filters (start: {}, len: {}), \ + with cached block filter hashes: ({},{}]", start_number, - actual_blocks_count as u64, - blocks, + filters_count, + cached_check_point_number, + next_cached_check_point_number ); - if matched_blocks.is_empty() { - if let Some((_start_number, _blocks_count, db_blocks)) = - self.filter.storage.get_earliest_matched_blocks() - { - self.filter - .peers - .add_matched_blocks(&mut matched_blocks, db_blocks); - prove_or_download_matched_blocks( - Arc::clone(&self.filter.peers), - &tip_header, - &matched_blocks, - self.nc.as_ref(), - INIT_BLOCKS_IN_TRANSIT_PER_PEER, - ); - } + if start_number <= cached_check_point_number + || start_number > next_cached_check_point_number + { + let errmsg = format!( + "first block filter (number: {}) could not be checked \ + with cached block filter hashes ({},{}]", + start_number, cached_check_point_number, next_cached_check_point_number + ); + return StatusCode::Ignore.with_context(errmsg); } - } else if matched_blocks.is_empty() { - self.filter - .storage - .update_block_number(filtered_block_number) + if cached_block_filter_hashes.is_empty() { + let errmsg = "cached block filter hashes is empty"; + return StatusCode::Ignore.with_context(errmsg); + } + if start_number == cached_check_point_number + 1 { + let cached_check_point = self + .filter + .storage + .get_check_points(cached_check_point_index, 1) + .get(0) + .cloned() + .expect("all check points before finalized should be existed"); + (cached_check_point, cached_block_filter_hashes) + } else { + let start_index = (start_number - cached_check_point_number) as usize - 2; + let parent_hash = cached_block_filter_hashes[start_index].clone(); + cached_block_filter_hashes.drain(..=start_index); + (parent_hash, cached_block_filter_hashes) + } + } else { + // Use latest block filter hashes to check the block filters. + let mut latest_block_filter_hashes = self + .filter + .peers + .get_latest_block_filter_hashes(finalized_check_point_index); + if start_number == finalized_check_point_number + 1 { + (finalized_check_point_hash, latest_block_filter_hashes) + } else { + let start_index = (start_number - finalized_check_point_number) as usize - 2; + let parent_hash = latest_block_filter_hashes[start_index].clone(); + latest_block_filter_hashes.drain(..=start_index); + (parent_hash, latest_block_filter_hashes) + } + }; + + let limit = cmp::min(filters_count, expected_block_filter_hashes.len()); + + for (index, (filter, expected_hash)) in block_filters + .filters() + .into_iter() + .take(limit) + .zip(expected_block_filter_hashes.into_iter()) + .enumerate() + { + let current_hash = calc_filter_hash(&parent_block_filter_hash, &filter).pack(); + if current_hash != expected_hash { + let errmsg = format!( + "peer {}: block filter hash for block {} expect {:#x} but got {:#x}", + self.peer, + start_number + index as BlockNumber, + expected_hash, + current_hash, + ); + return StatusCode::BlockFilterDataIsUnexpected.with_context(errmsg); } + parent_block_filter_hash = current_hash; + } + + let possible_match_blocks = self.filter.check_filters_data(block_filters, limit); + let possible_match_blocks_len = possible_match_blocks.len(); + trace!( + "peer {}, matched blocks: {}", + self.peer, + possible_match_blocks_len + ); + let actual_blocks_count = blocks_count.min(limit); + let tip_header = self.filter.storage.get_tip_header(); + let filtered_block_number = start_number - 1 + actual_blocks_count as BlockNumber; + let mut matched_blocks = self + .filter + .peers + .matched_blocks() + .write() + .expect("poisoned"); + if possible_match_blocks_len != 0 { + let blocks = possible_match_blocks + .iter() + .map(|block_hash| (block_hash.clone(), block_hash == &prove_state_block_hash)) + .collect::>(); + self.filter.storage.add_matched_blocks( + start_number, + actual_blocks_count as u64, + blocks, + ); + if matched_blocks.is_empty() { + if let Some((_start_number, _blocks_count, db_blocks)) = + self.filter.storage.get_earliest_matched_blocks() + { + self.filter + .peers + .add_matched_blocks(&mut matched_blocks, db_blocks); + prove_or_download_matched_blocks( + Arc::clone(&self.filter.peers), + &tip_header, + &matched_blocks, + self.nc.as_ref(), + INIT_BLOCKS_IN_TRANSIT_PER_PEER, + ); + } + } + } else if matched_blocks.is_empty() { self.filter - .update_min_filtered_block_number(filtered_block_number); + .storage + .update_block_number(filtered_block_number) + } + + self.filter + .update_min_filtered_block_number(filtered_block_number); + + let could_request_more_block_filters = self.filter.peers.could_request_more_block_filters( + finalized_check_point_index, + filtered_block_number + 1, + ); + if could_request_more_block_filters { // send next batch GetBlockFilters message to a random best peer - let best_peers: Vec<_> = self.filter.peers.get_best_proved_peers(&tip_header); - let next_peer = best_peers + let best_peer = self + .filter + .peers + .get_best_proved_peers(&tip_header) .into_iter() .filter(|peer| *peer != self.peer) .collect::>() @@ -155,8 +255,13 @@ impl<'a> BlockFiltersProcess<'a> { .cloned() .unwrap_or(self.peer); self.filter - .send_get_block_filters(self.nc, next_peer, filtered_block_number + 1); + .send_get_block_filters(self.nc, best_peer, filtered_block_number + 1); + } else { + // if couldn't request more block filters, + // check if could request more block filter hashes. + self.filter.try_send_get_block_filter_hashes(self.nc); } + Status::ok() } } diff --git a/src/protocols/filter/components/mod.rs b/src/protocols/filter/components/mod.rs index c72c5d9..3f79b0f 100644 --- a/src/protocols/filter/components/mod.rs +++ b/src/protocols/filter/components/mod.rs @@ -1,3 +1,7 @@ +mod block_filter_check_points_process; +mod block_filter_hashes_process; mod block_filters_process; +pub(crate) use block_filter_check_points_process::BlockFilterCheckPointsProcess; +pub(crate) use block_filter_hashes_process::BlockFilterHashesProcess; pub(crate) use block_filters_process::BlockFiltersProcess; diff --git a/src/protocols/light_client/components/send_blocks_proof.rs b/src/protocols/light_client/components/send_blocks_proof.rs index ed6c1a1..102fe96 100644 --- a/src/protocols/light_client/components/send_blocks_proof.rs +++ b/src/protocols/light_client/components/send_blocks_proof.rs @@ -11,7 +11,7 @@ use super::{ pub(crate) struct SendBlocksProofProcess<'a> { message: packed::SendBlocksProofReader<'a>, protocol: &'a mut LightClientProtocol, - peer: PeerIndex, + peer_index: PeerIndex, nc: &'a dyn CKBProtocolContext, } @@ -19,13 +19,13 @@ impl<'a> SendBlocksProofProcess<'a> { pub(crate) fn new( message: packed::SendBlocksProofReader<'a>, protocol: &'a mut LightClientProtocol, - peer: PeerIndex, + peer_index: PeerIndex, nc: &'a dyn CKBProtocolContext, ) -> Self { Self { message, protocol, - peer, + peer_index, nc, } } @@ -34,18 +34,17 @@ impl<'a> SendBlocksProofProcess<'a> { let status = self.execute_internally(); self.protocol .peers() - .update_blocks_proof_request(self.peer, None); + .update_blocks_proof_request(self.peer_index, None); status } fn execute_internally(&self) -> Status { - let peer_state = return_if_failed!(self.protocol.get_peer_state(&self.peer)); + let peer = return_if_failed!(self.protocol.get_peer(&self.peer_index)); - let original_request = if let Some(original_request) = peer_state.get_blocks_proof_request() - { + let original_request = if let Some(original_request) = peer.get_blocks_proof_request() { original_request } else { - error!("peer {} isn't waiting for a proof", self.peer); + error!("peer {} isn't waiting for a proof", self.peer_index); return StatusCode::PeerIsNotOnProcess.into(); }; @@ -57,14 +56,19 @@ impl<'a> SendBlocksProofProcess<'a> { && self.message.headers().is_empty() && self.message.missing_block_hashes().is_empty() { - return_if_failed!(self.protocol.process_last_state(self.peer, last_header)); + return_if_failed!(self + .protocol + .process_last_state(self.peer_index, last_header)); self.protocol .peers() - .mark_fetching_headers_timeout(self.peer); + .mark_fetching_headers_timeout(self.peer_index); return Status::ok(); } else { // Since the last state is different, then no data should be contained. - error!("peer {} send a proof with different last state", self.peer); + error!( + "peer {} send a proof with different last state", + self.peer_index + ); return StatusCode::UnexpectedResponse.into(); } } @@ -88,7 +92,7 @@ impl<'a> SendBlocksProofProcess<'a> { .into_iter() .collect::>(); if !original_request.check_block_hashes(&received_block_hashes, &missing_block_hashes) { - error!("peer {} send an unknown proof", self.peer); + error!("peer {} send an unknown proof", self.peer_index); return StatusCode::UnexpectedResponse.into(); } @@ -97,7 +101,7 @@ impl<'a> SendBlocksProofProcess<'a> { if !self.message.proof().is_empty() { error!( "peer {} send a proof when all blocks are missing", - self.peer + self.peer_index ); return StatusCode::UnexpectedResponse.into(); } @@ -134,26 +138,26 @@ impl<'a> SendBlocksProofProcess<'a> { .peers .get_best_proved_peers(&last_header.header().data()) .into_iter() - .filter_map(|peer| { + .filter_map(|peer_index| { self.protocol .peers - .get_state(&peer) - .map(|state| (peer, state)) + .get_peer(&peer_index) + .map(|peer| (peer_index, peer)) }) .collect(); - if let Some((peer, _)) = best_peers + if let Some((peer_index, _)) = best_peers .iter() - .filter(|(_peer, peer_state)| peer_state.get_blocks_request().is_none()) + .filter(|(_peer_index, peer)| peer.get_blocks_request().is_none()) .collect::>() .choose(&mut rand::thread_rng()) { self.protocol .peers - .update_blocks_request(*peer, Some(block_hashes.clone())); + .update_blocks_request(*peer_index, Some(block_hashes.clone())); debug!( "send get blocks request to peer: {}, matched_count: {}", - peer, + peer_index, block_hashes.len() ); for hashes in @@ -168,7 +172,7 @@ impl<'a> SendBlocksProofProcess<'a> { .as_bytes(); if let Err(err) = self.nc.send_message( SupportProtocols::Sync.protocol_id(), - *peer, + *peer_index, message, ) { let error_message = diff --git a/src/protocols/light_client/components/send_last_state.rs b/src/protocols/light_client/components/send_last_state.rs index dc859df..8f36255 100644 --- a/src/protocols/light_client/components/send_last_state.rs +++ b/src/protocols/light_client/components/send_last_state.rs @@ -1,12 +1,12 @@ use super::super::{LastState, LightClientProtocol, Status}; use ckb_network::{CKBProtocolContext, PeerIndex}; use ckb_types::{packed, prelude::*, utilities::merkle_mountain_range::VerifiableHeader}; -use log::trace; +use log::{debug, trace}; pub(crate) struct SendLastStateProcess<'a> { message: packed::SendLastStateReader<'a>, protocol: &'a mut LightClientProtocol, - peer: PeerIndex, + peer_index: PeerIndex, nc: &'a dyn CKBProtocolContext, } @@ -14,47 +14,59 @@ impl<'a> SendLastStateProcess<'a> { pub(crate) fn new( message: packed::SendLastStateReader<'a>, protocol: &'a mut LightClientProtocol, - peer: PeerIndex, + peer_index: PeerIndex, nc: &'a dyn CKBProtocolContext, ) -> Self { Self { message, protocol, - peer, + peer_index, nc, } } pub(crate) fn execute(self) -> Status { - let peer_state = return_if_failed!(self.protocol.get_peer_state(&self.peer)); + let peer_state = return_if_failed!(self.protocol.get_peer_state(&self.peer_index)); let last_header: VerifiableHeader = self.message.last_header().to_entity().into(); return_if_failed!(self.protocol.check_verifiable_header(&last_header)); let last_state = LastState::new(last_header); - self.protocol + return_if_failed!(self + .protocol .peers() - .update_last_state(self.peer, last_state.clone()); + .update_last_state(self.peer_index, last_state.clone())); if let Some(prev_last_state) = peer_state.get_last_state() { - trace!("peer {}: update last state", self.peer); - if prev_last_state.verifiable_header().total_difficulty() - < last_state.verifiable_header().total_difficulty() - { + trace!( + "peer {}: update last state from {} to {}", + self.peer_index, + prev_last_state, + last_state, + ); + if prev_last_state.total_difficulty() < last_state.total_difficulty() { if let Some(prove_state) = peer_state.get_prove_state() { if prove_state.is_parent_of(&last_state) { - trace!("peer {}: new last state could be trusted", self.peer); + trace!("peer {}: new last state could be trusted", self.peer_index); let last_n_blocks = self.protocol.last_n_blocks() as usize; let child_prove_state = prove_state.new_child(last_state, last_n_blocks); - self.protocol - .update_prove_state_to_child(self.peer, child_prove_state); + return_if_failed!(self + .protocol + .update_prove_state_to_child(self.peer_index, child_prove_state)); } } } } else { - trace!("peer {}: initialize last state", self.peer); - self.protocol.get_last_state_proof(self.nc, self.peer); + trace!("peer {}: initialize last state", self.peer_index); + let is_sent = + return_if_failed!(self.protocol.get_last_state_proof(self.nc, self.peer_index)); + if !is_sent { + debug!( + "peer {} skip sending a request for last state proof", + self.peer_index + ); + } } Status::ok() diff --git a/src/protocols/light_client/components/send_last_state_proof.rs b/src/protocols/light_client/components/send_last_state_proof.rs index b811f57..b969bb4 100644 --- a/src/protocols/light_client/components/send_last_state_proof.rs +++ b/src/protocols/light_client/components/send_last_state_proof.rs @@ -22,7 +22,7 @@ use super::super::{ pub(crate) struct SendLastStateProofProcess<'a> { message: packed::SendLastStateProofReader<'a>, protocol: &'a mut LightClientProtocol, - peer: PeerIndex, + peer_index: PeerIndex, nc: &'a dyn CKBProtocolContext, } @@ -30,24 +30,24 @@ impl<'a> SendLastStateProofProcess<'a> { pub(crate) fn new( message: packed::SendLastStateProofReader<'a>, protocol: &'a mut LightClientProtocol, - peer: PeerIndex, + peer_index: PeerIndex, nc: &'a dyn CKBProtocolContext, ) -> Self { Self { message, protocol, - peer, + peer_index, nc, } } pub(crate) fn execute(self) -> Status { - let peer_state = return_if_failed!(self.protocol.get_peer_state(&self.peer)); + let peer_state = return_if_failed!(self.protocol.get_peer_state(&self.peer_index)); let original_request = if let Some(original_request) = peer_state.get_prove_request() { original_request } else { - warn!("peer {} isn't waiting for a proof", self.peer); + warn!("peer {} isn't waiting for a proof", self.peer_index); return Status::ok(); }; @@ -56,10 +56,19 @@ impl<'a> SendLastStateProofProcess<'a> { // Update the last state if the response contains a new one. if !original_request.is_same_as(&last_header) { if self.message.proof().is_empty() { - return_if_failed!(self.protocol.process_last_state(self.peer, last_header)); - self.protocol.get_last_state_proof(self.nc, self.peer); + return_if_failed!(self + .protocol + .process_last_state(self.peer_index, last_header)); + let is_sent = + return_if_failed!(self.protocol.get_last_state_proof(self.nc, self.peer_index)); + if !is_sent { + debug!( + "peer {} skip sending a request for last state proof", + self.peer_index + ); + } } else { - warn!("peer {} send an unknown proof", self.peer); + warn!("peer {} send an unknown proof", self.peer_index); } return Status::ok(); } @@ -82,7 +91,7 @@ impl<'a> SendLastStateProofProcess<'a> { )); trace!( "peer {}: headers count: reorg: {}, sampled: {}, last_n: {}", - self.peer, + self.peer_index, reorg_count, sampled_count, last_n_count @@ -101,7 +110,10 @@ impl<'a> SendLastStateProofProcess<'a> { // Check tau with epoch difficulties of samples. let failed_to_verify_tau = if original_request.if_skip_check_tau() { - trace!("peer {} skip checking TAU since the flag is set", self.peer); + trace!( + "peer {} skip checking TAU since the flag is set", + self.peer_index + ); false } else if sampled_count != 0 { let start_header = &headers[reorg_count]; @@ -119,7 +131,7 @@ impl<'a> SendLastStateProofProcess<'a> { } else { trace!( "peer {} skip checking TAU since no sampled headers", - self.peer + self.peer_index ); false }; @@ -172,19 +184,20 @@ impl<'a> SendLastStateProofProcess<'a> { let mut prove_request = ProveRequest::new(LastState::new(last_header), content.clone()); prove_request.skip_check_tau(); - self.protocol + return_if_failed!(self + .protocol .peers() - .update_prove_request(self.peer, Some(prove_request)); + .update_prove_request(self.peer_index, prove_request)); let message = packed::LightClientMessage::new_builder() .set(content) .build(); - self.nc.reply(self.peer, &message); + self.nc.reply(self.peer_index, &message); let errmsg = "failed to verify TAU"; return StatusCode::RequireRecheck.with_context(errmsg); } else { - warn!("peer {}, build prove request failed", self.peer); + warn!("peer {}, build prove request failed", self.peer_index); } } else { let reorg_last_headers = headers[..reorg_count] @@ -247,9 +260,9 @@ impl<'a> SendLastStateProofProcess<'a> { panic!("long fork detected"); } - let long_fork_detected = !self + let long_fork_detected = !return_if_failed!(self .protocol - .commit_prove_state(self.peer, prove_state.clone()); + .commit_prove_state(self.peer_index, prove_state.clone())); if long_fork_detected { let last_header = prove_state.get_last_header(); @@ -260,27 +273,28 @@ impl<'a> SendLastStateProofProcess<'a> { let mut prove_request = ProveRequest::new(LastState::new(last_header.clone()), content.clone()); prove_request.long_fork_detected(); - self.protocol + return_if_failed!(self + .protocol .peers() - .update_prove_request(self.peer, Some(prove_request)); + .update_prove_request(self.peer_index, prove_request)); let message = packed::LightClientMessage::new_builder() .set(content) .build(); - self.nc.reply(self.peer, &message); + self.nc.reply(self.peer_index, &message); let errmsg = "long fork detected"; return StatusCode::RequireRecheck.with_context(errmsg); } else { warn!( "peer {}, build prove request from genesis failed", - self.peer + self.peer_index ); } } } - debug!("block proof verify passed for peer: {}", self.peer); + debug!("block proof verify passed for peer: {}", self.peer_index); Status::ok() } } diff --git a/src/protocols/light_client/components/send_transactions_proof.rs b/src/protocols/light_client/components/send_transactions_proof.rs index df63b2c..3b4b2ee 100644 --- a/src/protocols/light_client/components/send_transactions_proof.rs +++ b/src/protocols/light_client/components/send_transactions_proof.rs @@ -14,7 +14,7 @@ use super::{ pub(crate) struct SendTransactionsProofProcess<'a> { message: packed::SendTransactionsProofReader<'a>, protocol: &'a mut LightClientProtocol, - peer: PeerIndex, + peer_index: PeerIndex, _nc: &'a dyn CKBProtocolContext, } @@ -22,13 +22,13 @@ impl<'a> SendTransactionsProofProcess<'a> { pub(crate) fn new( message: packed::SendTransactionsProofReader<'a>, protocol: &'a mut LightClientProtocol, - peer: PeerIndex, + peer_index: PeerIndex, nc: &'a dyn CKBProtocolContext, ) -> Self { Self { message, protocol, - peer, + peer_index, _nc: nc, } } @@ -37,17 +37,17 @@ impl<'a> SendTransactionsProofProcess<'a> { let status = self.execute_internally(); self.protocol .peers() - .update_txs_proof_request(self.peer, None); + .update_txs_proof_request(self.peer_index, None); status } fn execute_internally(&self) -> Status { - let peer_state = return_if_failed!(self.protocol.get_peer_state(&self.peer)); + let peer = return_if_failed!(self.protocol.get_peer(&self.peer_index)); - let original_request = if let Some(original_request) = peer_state.get_txs_proof_request() { + let original_request = if let Some(original_request) = peer.get_txs_proof_request() { original_request } else { - error!("peer {} isn't waiting for a proof", self.peer); + error!("peer {} isn't waiting for a proof", self.peer_index); return StatusCode::PeerIsNotOnProcess.into(); }; @@ -59,12 +59,19 @@ impl<'a> SendTransactionsProofProcess<'a> { && self.message.filtered_blocks().is_empty() && self.message.missing_tx_hashes().is_empty() { - return_if_failed!(self.protocol.process_last_state(self.peer, last_header)); - self.protocol.peers().mark_fetching_txs_timeout(self.peer); + return_if_failed!(self + .protocol + .process_last_state(self.peer_index, last_header)); + self.protocol + .peers() + .mark_fetching_txs_timeout(self.peer_index); return Status::ok(); } else { // Since the last state is different, then no data should be contained. - error!("peer {} send a proof with different last state", self.peer); + error!( + "peer {} send a proof with different last state", + self.peer_index + ); return StatusCode::UnexpectedResponse.into(); } } @@ -92,7 +99,7 @@ impl<'a> SendTransactionsProofProcess<'a> { .into_iter() .collect::>(); if !original_request.check_tx_hashes(&received_tx_hashes, &missing_tx_hashes) { - error!("peer {} send an unknown proof", self.peer); + error!("peer {} send an unknown proof", self.peer_index); return StatusCode::UnexpectedResponse.into(); } @@ -101,7 +108,7 @@ impl<'a> SendTransactionsProofProcess<'a> { if !self.message.proof().is_empty() { error!( "peer {} send a proof when all transactions are missing", - self.peer + self.peer_index ); return StatusCode::UnexpectedResponse.into(); } diff --git a/src/protocols/light_client/mod.rs b/src/protocols/light_client/mod.rs index 454369f..a5269bb 100644 --- a/src/protocols/light_client/mod.rs +++ b/src/protocols/light_client/mod.rs @@ -18,8 +18,8 @@ use ckb_types::{ U256, }; -use faketime::unix_time_as_millis; -use log::{debug, error, info, trace, warn}; +use ckb_systemtime::unix_time_as_millis; +use log::{debug, error, info, log_enabled, trace, warn, Level}; mod components; pub mod constant; @@ -34,7 +34,7 @@ pub(crate) use self::peers::FetchInfo; use prelude::*; -pub(crate) use self::peers::{LastState, PeerState, Peers, ProveRequest, ProveState}; +pub(crate) use self::peers::{LastState, Peer, PeerState, Peers, ProveRequest, ProveState}; use super::{ status::{Status, StatusCode}, BAD_MESSAGE_BAN_TIME, @@ -80,23 +80,32 @@ impl CKBProtocolHandler for LightClientProtocol { async fn connected( &mut self, nc: Arc, - peer: PeerIndex, + peer_index: PeerIndex, version: &str, ) { - info!("LightClient({}).connected peer={}", version, peer); - self.peers().add_peer(peer); - self.get_last_state(nc.as_ref(), peer); + info!("LightClient({}).connected peer={}", version, peer_index); + self.peers().add_peer(peer_index); + if let Err(err) = self.get_last_state(nc.as_ref(), peer_index) { + error!( + "failed to request last state from peer={} since {}", + peer_index, err + ); + } } - async fn disconnected(&mut self, _nc: Arc, peer: PeerIndex) { - info!("LightClient.disconnected peer={}", peer); - self.peers().remove_peer(peer); + async fn disconnected( + &mut self, + _nc: Arc, + peer_index: PeerIndex, + ) { + info!("LightClient.disconnected peer={}", peer_index); + self.peers().remove_peer(peer_index); } async fn received( &mut self, nc: Arc, - peer: PeerIndex, + peer_index: PeerIndex, data: Bytes, ) { let msg = match packed::LightClientMessageReader::from_slice(&data) { @@ -104,10 +113,10 @@ impl CKBProtocolHandler for LightClientProtocol { _ => { warn!( "LightClient.received a malformed message from Peer({})", - peer + peer_index ); nc.ban_peer( - peer, + peer_index, BAD_MESSAGE_BAN_TIME, String::from("send us a malformed message"), ); @@ -116,19 +125,8 @@ impl CKBProtocolHandler for LightClientProtocol { }; let item_name = msg.item_name(); - let status = self.try_process(nc.as_ref(), peer, msg); - trace!("LightClient.received peer={}, message={}", peer, item_name); - if let Some(ban_time) = status.should_ban() { - error!( - "process {} from {}, ban {:?} since result is {}", - item_name, peer, ban_time, status - ); - nc.ban_peer(peer, ban_time, status.to_string()); - } else if status.should_warn() { - warn!("process {} from {}, result is {}", item_name, peer, status); - } else if !status.is_ok() { - debug!("process {} from {}, result is {}", item_name, peer, status); - } + let status = self.try_process(nc.as_ref(), peer_index, msg); + status.process(nc, peer_index, "LightClient", item_name); } async fn notify(&mut self, nc: Arc, token: u64) { @@ -151,44 +149,55 @@ impl LightClientProtocol { fn try_process( &mut self, nc: &dyn CKBProtocolContext, - peer: PeerIndex, + peer_index: PeerIndex, message: packed::LightClientMessageUnionReader<'_>, ) -> Status { match message { packed::LightClientMessageUnionReader::SendLastState(reader) => { - components::SendLastStateProcess::new(reader, self, peer, nc).execute() + components::SendLastStateProcess::new(reader, self, peer_index, nc).execute() } packed::LightClientMessageUnionReader::SendLastStateProof(reader) => { - components::SendLastStateProofProcess::new(reader, self, peer, nc).execute() + components::SendLastStateProofProcess::new(reader, self, peer_index, nc).execute() } packed::LightClientMessageUnionReader::SendBlocksProof(reader) => { - components::SendBlocksProofProcess::new(reader, self, peer, nc).execute() + components::SendBlocksProofProcess::new(reader, self, peer_index, nc).execute() } packed::LightClientMessageUnionReader::SendTransactionsProof(reader) => { - components::SendTransactionsProofProcess::new(reader, self, peer, nc).execute() + components::SendTransactionsProofProcess::new(reader, self, peer_index, nc) + .execute() } _ => StatusCode::UnexpectedProtocolMessage.into(), } } - fn get_last_state(&self, nc: &dyn CKBProtocolContext, peer: PeerIndex) { + fn get_last_state( + &self, + nc: &dyn CKBProtocolContext, + peer_index: PeerIndex, + ) -> Result<(), Status> { let content = packed::GetLastState::new_builder() .subscribe(true.pack()) .build(); let message = packed::LightClientMessage::new_builder() .set(content) .build(); - nc.reply(peer, &message); + self.peers().request_last_state(peer_index)?; + nc.reply(peer_index, &message); + Ok(()) } - fn get_last_state_proof(&self, nc: &dyn CKBProtocolContext, peer: PeerIndex) { + fn get_last_state_proof( + &self, + nc: &dyn CKBProtocolContext, + peer_index: PeerIndex, + ) -> Result { let peer_state = self .peers() - .get_state(&peer) + .get_state(&peer_index) .expect("checked: should have state"); if let Some(last_state) = peer_state.get_last_state() { - let last_header = last_state.verifiable_header(); + let last_header = last_state.as_ref(); let is_proved = peer_state .get_prove_state() @@ -197,7 +206,7 @@ impl LightClientProtocol { // Skipped is the state is proved. if is_proved { - return; + return Ok(false); } // Skipped is the request is sent. @@ -206,34 +215,38 @@ impl LightClientProtocol { .map(|inner| inner.is_same_as(last_header)) .unwrap_or(false); if is_requested { - return; + return Ok(false); } // Skipped if the header is proved in other peers. - if let Some((peer_copied_from, prove_state)) = + if let Some((peer_index_copied_from, prove_state)) = self.peers().find_if_a_header_is_proved(last_header) { info!( "peer {}: copy prove state from peer {}", - peer, peer_copied_from + peer_index, peer_index_copied_from ); - self.peers().update_prove_state(peer, prove_state); - return; + self.peers().update_prove_state(peer_index, prove_state)?; + return Ok(false); } if let Some(content) = self.build_prove_request_content(&peer_state, last_header) { - trace!("peer {}: send get last state proof", peer); + trace!("peer {}: send get last state proof", peer_index); let message = packed::LightClientMessage::new_builder() .set(content.clone()) .build(); - nc.reply(peer, &message); - let now = unix_time_as_millis(); - self.peers().update_timestamp(peer, now); + nc.reply(peer_index, &message); let prove_request = ProveRequest::new(last_state.clone(), content); - self.peers().update_prove_request(peer, Some(prove_request)); + self.peers() + .update_prove_request(peer_index, prove_request)?; + Ok(true) } else { - warn!("peer {}: build prove request failed", peer); + warn!("peer {}: build prove request failed", peer_index); + Ok(false) } + } else { + warn!("peer {}: no last state for building request", peer_index); + Ok(false) } } @@ -282,20 +295,24 @@ impl LightClientProtocol { /// Processes a new last state that received from a peer which has a fork chain. fn process_last_state( &self, - peer: PeerIndex, + peer_index: PeerIndex, last_header: VerifiableHeader, ) -> Result<(), Status> { self.check_verifiable_header(&last_header)?; let last_state = LastState::new(last_header); - trace!("peer {}: update last state", peer); - self.peers().update_last_state(peer, last_state); + trace!("peer {}: update last state", peer_index); + self.peers().update_last_state(peer_index, last_state)?; Ok(()) } /// Update the prove state to the child block. /// - Update the peer's cache. /// - Try to update the storage without caring about fork. - fn update_prove_state_to_child(&self, peer: PeerIndex, new_prove_state: ProveState) { + fn update_prove_state_to_child( + &self, + peer_index: PeerIndex, + new_prove_state: ProveState, + ) -> Result<(), Status> { let (old_total_difficulty, _) = self.storage.get_last_state(); let new_total_difficulty = new_prove_state.get_last_header().total_difficulty(); if new_total_difficulty > old_total_difficulty { @@ -305,13 +322,17 @@ impl LightClientProtocol { new_prove_state.get_last_headers(), ); } - self.peers().update_prove_state(peer, new_prove_state); + self.peers().update_prove_state(peer_index, new_prove_state) } /// Update the prove state base on the previous request. /// - Update the peer's cache. /// - Try to update the storage and handle potential fork. - pub(crate) fn commit_prove_state(&self, peer: PeerIndex, new_prove_state: ProveState) -> bool { + pub(crate) fn commit_prove_state( + &self, + peer_index: PeerIndex, + new_prove_state: ProveState, + ) -> Result { let (old_total_difficulty, prev_last_header) = self.storage.get_last_state(); let new_total_difficulty = new_prove_state.get_last_header().total_difficulty(); if new_total_difficulty > old_total_difficulty { @@ -369,7 +390,7 @@ impl LightClientProtocol { matched_blocks.clear(); } else { warn!("long fork detected"); - return false; + return Ok(false); } } @@ -379,9 +400,9 @@ impl LightClientProtocol { new_prove_state.get_last_headers(), ); } - self.peers().commit_prove_state(peer, new_prove_state); - - true + self.peers() + .update_prove_state(peer_index, new_prove_state)?; + Ok(true) } } @@ -456,30 +477,213 @@ impl LightClientProtocol { &self.peers } - pub(crate) fn get_peer_state(&self, peer: &PeerIndex) -> Result { - if let Some(state) = self.peers().get_state(peer) { + pub(crate) fn get_peer(&self, peer_index: &PeerIndex) -> Result { + if let Some(state) = self.peers().get_peer(peer_index) { + Ok(state) + } else { + Err(StatusCode::PeerIsNotFound.into()) + } + } + + pub(crate) fn get_peer_state(&self, peer_index: &PeerIndex) -> Result { + if let Some(state) = self.peers().get_state(peer_index) { Ok(state) } else { - Err(StatusCode::PeerStateIsNotFound.into()) + Err(StatusCode::PeerIsNotFound.into()) } } fn refresh_all_peers(&mut self, nc: &dyn CKBProtocolContext) { - let now = faketime::unix_time_as_millis(); - for peer in self.peers().get_peers_which_have_timeout(now) { - self.peers().mark_fetching_headers_timeout(peer); - self.peers().mark_fetching_txs_timeout(peer); - - warn!("peer {}: reach timeout", peer); - if let Err(err) = nc.disconnect(peer, "reach timeout") { - error!("disconnect peer({}) error: {}", peer, err); + let now = unix_time_as_millis(); + for peer_index in self.peers().get_peers_which_have_timeout(now) { + self.peers().mark_fetching_headers_timeout(peer_index); + self.peers().mark_fetching_txs_timeout(peer_index); + + warn!("peer {}: reach timeout", peer_index); + if let Err(err) = nc.disconnect(peer_index, "reach timeout") { + error!("disconnect peer({}) error: {}", peer_index, err); }; } - let before = now - constant::REFRESH_PEERS_DURATION.as_millis() as u64; - for peer in self.peers().get_peers_which_require_updating(before) { - // TODO Different messages should have different timeouts. - self.get_last_state(nc, peer); - self.get_last_state_proof(nc, peer); + let before_ts = now - constant::REFRESH_PEERS_DURATION.as_millis() as u64; + for index in self.peers().get_peers_which_require_new_state(before_ts) { + if let Err(err) = self.get_last_state(nc, index) { + error!( + "failed to request last state from peer={} since {}", + index, err + ); + } + } + for index in self.peers().get_peers_which_require_new_proof() { + if let Err(err) = self.get_last_state_proof(nc, index) { + error!( + "failed to request last state proof from peer={} since {}", + index, err + ); + } + } + self.finalize_check_points(nc); + } + + fn finalize_check_points(&mut self, nc: &dyn CKBProtocolContext) { + let peers = self.peers(); + let required_peers_count = peers.required_peers_count(); + let mut peers_with_data = peers.get_all_proved_check_points(); + if log_enabled!(Level::Trace) { + for (peer_index, (start_cpindex, check_points)) in peers_with_data.iter() { + trace!( + "check points for peer {} in [{},{}]", + peer_index, + start_cpindex, + start_cpindex + check_points.len() as u32 - 1, + ); + } + } + + if peers_with_data.len() < required_peers_count { + debug!( + "no enough peers for finalizing check points, \ + requires {} but got {}", + required_peers_count, + peers_with_data.len() + ); + return; + } + trace!( + "requires {} peers for finalizing check points and got {}", + required_peers_count, + peers_with_data.len() + ); + let (last_cpindex, last_check_point) = self.storage.get_last_check_point(); + trace!( + "finalized check point is {}, {:#x}", + last_cpindex, + last_check_point + ); + // Clean finalized check points for new proved peers. + { + let mut peers_should_be_skipped = Vec::new(); + for (peer_index, (start_cpindex, check_points)) in peers_with_data.iter_mut() { + if *start_cpindex > last_cpindex { + // Impossible, in fact. + error!( + "peer {} will be banned \ + since start check point {} is later than finalized {}", + peer_index, start_cpindex, last_cpindex + ); + peers_should_be_skipped.push((*peer_index, true)); + continue; + } + let index = (last_cpindex - *start_cpindex) as usize; + if index >= check_points.len() { + peers_should_be_skipped.push((*peer_index, false)); + continue; + } + if check_points[index] != last_check_point { + info!( + "peer {} will be banned \ + since its {}-th check point is {:#x} but finalized is {:#x}", + peer_index, last_cpindex, check_points[index], last_check_point + ); + peers_should_be_skipped.push((*peer_index, true)); + continue; + } + if index > 0 { + check_points.drain(..index); + *start_cpindex = last_cpindex; + peers.remove_first_n_check_points(*peer_index, index); + trace!( + "peer {} remove first {} check points, \ + new start check point is {}, {:#x}", + peer_index, + index, + *start_cpindex, + check_points[0] + ); + } + } + for (peer_index, should_ban) in peers_should_be_skipped { + if should_ban { + nc.ban_peer( + peer_index, + BAD_MESSAGE_BAN_TIME, + String::from("incorrect check points"), + ); + } + peers_with_data.remove(&peer_index); + } + } + if peers_with_data.len() < required_peers_count { + trace!( + "no enough peers for finalizing check points after cleaning, \ + requires {} but got {}", + required_peers_count, + peers_with_data.len() + ); + return; + } + // Find a new check point to finalized. + let check_point_opt = + { + let length_max = { + let mut check_points_sizes = peers_with_data + .values() + .map(|(_cpindex, check_points)| check_points.len()) + .collect::>(); + check_points_sizes.sort(); + check_points_sizes[required_peers_count - 1] + }; + trace!( + "new last check point will be less than or equal to {}", + last_cpindex + length_max as u32 - 1 + ); + let mut check_point_opt = None; + // Q. Why don't check from bigger to smaller? + // A. We have to make sure if all check points are matched. + // To avoid that a bad peer sends us only start checkpoints and last points are correct. + for index in 1..length_max { + let map = peers_with_data + .values() + .map(|(_cpindex, check_points)| check_points.get(index)) + .fold(HashMap::new(), |mut map, cp_opt| { + if let Some(cp) = cp_opt { + *map.entry(cp.clone()).or_default() += 1; + } + map + }); + let count_max = map.values().max().cloned().unwrap_or(0); + if count_max >= required_peers_count { + let cp_opt = map.into_iter().find_map(|(cp, count)| { + if count == count_max { + Some(cp) + } else { + None + } + }); + let cp = cp_opt.expect("checked: must be found"); + if count_max != peers_with_data.len() { + peers_with_data.retain(|_, (_, check_points)| { + matches!(check_points.get(index), Some(tmp) if *tmp == cp) + }); + } + check_point_opt = Some((index, cp)); + } else { + break; + } + } + check_point_opt + }; + if let Some((index, check_point)) = check_point_opt { + let new_last_cpindex = last_cpindex + index as u32; + info!( + "finalize {} new check points, stop at index {}, value {:#x}", + index, new_last_cpindex, check_point + ); + let (_, check_points) = peers_with_data.into_values().next().expect("always exists"); + self.storage + .update_check_points(last_cpindex + 1, &check_points[1..=index]); + self.storage.update_max_check_point_index(new_last_cpindex); + } else { + info!("no check point is found which could be finalized"); } } @@ -515,13 +719,13 @@ impl LightClientProtocol { .get_headers_to_fetch() .chunks(GET_BLOCKS_PROOF_LIMIT) { - if let Some(peer) = best_peers.iter().find(|peer| { + if let Some(peer_index) = best_peers.iter().find(|peer_index| { self.peers - .get_state(peer) - .map(|peer_state| peer_state.get_blocks_proof_request().is_none()) + .get_peer(peer_index) + .map(|peer| peer.get_blocks_proof_request().is_none()) .unwrap_or(false) }) { - debug!("send block proof request to peer: {}", peer); + debug!("send block proof request to peer: {}", peer_index); let mut block_hashes = Vec::with_capacity(block_hashes_all.len()); for block_hash in block_hashes_all { if block_hash == &last_hash { @@ -544,10 +748,13 @@ impl LightClientProtocol { .build() .as_bytes(); - self.peers.update_blocks_proof_request(*peer, Some(content)); - if let Err(err) = - nc.send_message(SupportProtocols::LightClient.protocol_id(), *peer, message) - { + self.peers + .update_blocks_proof_request(*peer_index, Some(content)); + if let Err(err) = nc.send_message( + SupportProtocols::LightClient.protocol_id(), + *peer_index, + message, + ) { let error_message = format!("nc.send_message LightClientMessage, error: {:?}", err); error!("{}", error_message); @@ -565,13 +772,13 @@ impl LightClientProtocol { .get_txs_to_fetch() .chunks(GET_TRANSACTIONS_PROOF_LIMIT) { - if let Some(peer) = best_peers.iter().find(|peer| { + if let Some(peer_index) = best_peers.iter().find(|peer_index| { self.peers - .get_state(peer) - .map(|peer_state| peer_state.get_txs_proof_request().is_none()) + .get_peer(peer_index) + .map(|peer| peer.get_txs_proof_request().is_none()) .unwrap_or(false) }) { - debug!("send transaction proof request to peer: {}", peer); + debug!("send transaction proof request to peer: {}", peer_index); let content = packed::GetTransactionsProof::new_builder() .tx_hashes(tx_hashes.to_vec().pack()) .last_hash(last_hash.clone()) @@ -580,10 +787,11 @@ impl LightClientProtocol { .set(content.clone()) .build(); - self.peers.update_txs_proof_request(*peer, Some(content)); + self.peers + .update_txs_proof_request(*peer_index, Some(content)); if let Err(err) = nc.send_message( SupportProtocols::LightClient.protocol_id(), - *peer, + *peer_index, message.as_bytes(), ) { let error_message = diff --git a/src/protocols/light_client/peers.rs b/src/protocols/light_client/peers.rs index a6482a9..de3eade 100644 --- a/src/protocols/light_client/peers.rs +++ b/src/protocols/light_client/peers.rs @@ -1,17 +1,23 @@ use ckb_network::PeerIndex; +use ckb_systemtime::unix_time_as_millis; use ckb_types::{ - core::HeaderView, packed, packed::Byte32, prelude::*, - utilities::merkle_mountain_range::VerifiableHeader, H256, + core::{BlockNumber, HeaderView}, + packed, + packed::Byte32, + prelude::*, + utilities::merkle_mountain_range::VerifiableHeader, + H256, U256, }; use dashmap::DashMap; -use faketime::unix_time_as_millis; -use std::collections::{HashMap, HashSet}; -use std::sync::RwLock; +use std::{ + collections::{HashMap, HashSet}, + fmt, mem, + sync::RwLock, +}; use super::prelude::*; -use crate::protocols::MESSAGE_TIMEOUT; +use crate::protocols::{Status, StatusCode, MESSAGE_TIMEOUT}; -#[derive(Default)] pub struct Peers { inner: DashMap, // verified last N block headers @@ -25,13 +31,28 @@ pub struct Peers { // * if the block is proved // * the downloaded block matched_blocks: RwLock)>>, + + // Data: + // - Cached check point index. + // - Block filter hashes between current cached check point and next cached check point. + // - Exclude the cached check point. + // - Include at the next cached check point. + cached_block_filter_hashes: RwLock<(u32, Vec)>, + + max_outbound_peers: u32, + check_point_interval: BlockNumber, + start_check_point: (u32, packed::Byte32), } -#[derive(Default, Clone)] +#[derive(Clone)] pub struct Peer { // The peer is just discovered when it's `None`. state: PeerState, - update_timestamp: u64, + blocks_proof_request: Option, + blocks_request: Option, + txs_proof_request: Option, + check_points: CheckPoints, + latest_block_filter_hashes: LatestBlockFilterHashes, } pub struct FetchInfo { @@ -45,20 +66,64 @@ pub struct FetchInfo { missing: bool, } -#[derive(Clone, Debug)] +#[derive(Clone)] pub(crate) struct LastState { header: VerifiableHeader, + update_ts: u64, } -#[derive(Clone, Default)] -pub(crate) struct PeerState { - // Save the header instead of the request message - last_state: Option, - prove_request: Option, - prove_state: Option, - blocks_proof_request: Option, - blocks_request: Option, - txs_proof_request: Option, +/* + * ```plantuml + * @startuml + * state "Initialized" as st1 + * state "RequestFirstLastState" as st2 + * state "OnlyHasLastState" as st3 + * state "RequestFirstLastStateProof" as st4 + * state "Ready" as st5 + * state "RequestNewLastState" as st6 + * state "RequestNewLastStateProof" as st7 + * + * [*] -> st1 : Connect Peer + * st1 -> st2 : Send GetLastState + * st2 -D-> st3 : Receive SendLastState + * st3 -> st4 : Send GetLastStateProof + * st4 -> st5 : Receive SendLastStateProof + * st5 -U-> st6 : Send GetLastState + * st6 -> st5 : Receive SendLastState + * st5 -D-> st7 : Send GetLastStateProof + * st7 -> st5 : Receive SendLastStateProof + * @endum + * ``` + */ +#[derive(Clone)] +pub(crate) enum PeerState { + Initialized, + RequestFirstLastState { + when_sent: u64, + }, + OnlyHasLastState { + last_state: LastState, + }, + RequestFirstLastStateProof { + last_state: LastState, + request: ProveRequest, + when_sent: u64, + }, + Ready { + last_state: LastState, + prove_state: ProveState, + }, + RequestNewLastState { + last_state: LastState, + prove_state: ProveState, + when_sent: u64, + }, + RequestNewLastStateProof { + last_state: LastState, + prove_state: ProveState, + request: ProveRequest, + when_sent: u64, + }, } #[derive(Clone)] @@ -69,7 +134,7 @@ pub(crate) struct ProveRequest { long_fork_detected: bool, } -#[derive(Clone, Debug)] +#[derive(Clone)] pub(crate) struct ProveState { last_state: LastState, reorg_last_headers: Vec, @@ -95,6 +160,23 @@ pub(crate) struct TransactionsProofRequest { when_sent: u64, } +#[derive(Clone)] +pub(crate) struct CheckPoints { + check_point_interval: BlockNumber, + // The index of the first check point in the memory. + index_of_first_check_point: u32, + // Exists at least 1 check point. + // N.B. Do NOT leak any API that could make this vector be empty. + inner: Vec, +} + +#[derive(Clone)] +pub(crate) struct LatestBlockFilterHashes { + // The previous block number of the first block filter hash. + check_point_number: BlockNumber, + inner: Vec, +} + impl FetchInfo { #[cfg(test)] pub fn new(added_ts: u64, first_sent: u64, timeout: bool, missing: bool) -> FetchInfo { @@ -129,13 +211,60 @@ impl AsRef for LastState { } } +impl fmt::Display for LastState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let header = self.header.header(); + if f.alternate() { + write!( + f, + "LastState {{ num: {}, hash: {:#x}, ts: {} }}", + header.number(), + header.hash(), + self.update_ts + ) + } else { + write!(f, "{}", header.number()) + } + } +} + impl LastState { pub(crate) fn new(header: VerifiableHeader) -> LastState { - LastState { header } + LastState { + header, + update_ts: unix_time_as_millis(), + } + } + + pub(crate) fn total_difficulty(&self) -> U256 { + self.as_ref().total_difficulty() } - pub(crate) fn verifiable_header(&self) -> &VerifiableHeader { - self.as_ref() + pub(crate) fn header(&self) -> &HeaderView { + self.as_ref().header() + } +} + +impl fmt::Display for ProveRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let tau_status = if self.skip_check_tau { + "skipped" + } else { + "normal" + }; + if f.alternate() { + write!( + f, + "LastState {{ last_state: {:#}, tau: {}, fork: {} }}", + self.last_state, tau_status, self.long_fork_detected, + ) + } else { + write!( + f, + "{} (tau: {}, fork: {})", + self.last_state, tau_status, self.long_fork_detected, + ) + } } } @@ -150,7 +279,7 @@ impl ProveRequest { } pub(crate) fn get_last_header(&self) -> &VerifiableHeader { - self.last_state.verifiable_header() + self.last_state.as_ref() } pub(crate) fn is_same_as(&self, another: &VerifiableHeader) -> bool { @@ -178,6 +307,39 @@ impl ProveRequest { } } +impl fmt::Display for ProveState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if f.alternate() { + write!(f, "ProveState {{ last_state: {:#}", self.last_state)?; + if self.reorg_last_headers.is_empty() { + write!(f, ", reorg: None")?; + } else { + let len = self.reorg_last_headers.len(); + let start = self.reorg_last_headers[0].number(); + let end = self.reorg_last_headers[len - 1].number(); + write!(f, ", reorg: [{}, {}]", start, end)?; + } + if self.last_headers.is_empty() { + write!(f, ", last: None")?; + } else { + let len = self.last_headers.len(); + let start = self.last_headers[0].number(); + let end = self.last_headers[len - 1].number(); + write!(f, ", last: [{}, {}]", start, end)?; + } + write!(f, " }}") + } else { + write!( + f, + "{} (reorg: {}, last: {})", + self.last_state, + self.reorg_last_headers.len(), + self.last_headers.len() + ) + } + } +} + impl ProveState { pub(crate) fn new_from_request( request: ProveRequest, @@ -211,11 +373,11 @@ impl ProveState { pub(crate) fn is_parent_of(&self, child_last_state: &LastState) -> bool { self.get_last_header() .header() - .is_parent_of(child_last_state.verifiable_header().header()) + .is_parent_of(child_last_state.header()) } pub(crate) fn get_last_header(&self) -> &VerifiableHeader { - self.last_state.verifiable_header() + self.last_state.as_ref() } pub(crate) fn is_same_as(&self, another: &VerifiableHeader) -> bool { @@ -321,49 +483,586 @@ impl TransactionsProofRequest { } } +impl CheckPoints { + fn new( + check_point_interval: BlockNumber, + index_of_first_check_point: u32, + first_check_point: packed::Byte32, + ) -> Self { + Self { + check_point_interval, + index_of_first_check_point, + inner: vec![first_check_point], + } + } + + fn get_start_index(&self) -> u32 { + self.index_of_first_check_point + } + + fn get_check_points(&self) -> Vec { + self.inner.clone() + } + + fn number_of_first_check_point(&self) -> BlockNumber { + self.check_point_interval * BlockNumber::from(self.index_of_first_check_point) + } + + fn number_of_last_check_point(&self) -> BlockNumber { + let first = self.number_of_first_check_point(); + let count = self.inner.len() as BlockNumber; + first + self.check_point_interval * (count - 1) + } + + fn number_of_next_check_point(&self) -> BlockNumber { + self.number_of_last_check_point() + } + + fn if_require_next_check_point(&self, last_proved_number: BlockNumber) -> bool { + self.number_of_next_check_point() + self.check_point_interval * 2 <= last_proved_number + } + + fn add_check_points( + &mut self, + last_proved_number: BlockNumber, + start_number: BlockNumber, + check_points: &[packed::Byte32], + ) -> Result, Status> { + if check_points.is_empty() { + return Err(StatusCode::CheckPointsIsEmpty.into()); + } + if start_number % self.check_point_interval != 0 { + let errmsg = format!( + "check points should at `{} * N` but got {}", + self.check_point_interval, start_number + ); + return Err(StatusCode::CheckPointsIsUnaligned.with_context(errmsg)); + } + let next_number = self.number_of_next_check_point(); + if start_number != next_number { + let errmsg = format!( + "expect starting from {} but got {}", + next_number, start_number + ); + return Err(StatusCode::CheckPointsIsUnexpected.with_context(errmsg)); + } + let prev_last_check_point = &self.inner[self.inner.len() - 1]; + let curr_first_check_point = &check_points[0]; + if prev_last_check_point != curr_first_check_point { + let errmsg = format!( + "expect hash for number {} is {:#x} but got {:#x}", + start_number, prev_last_check_point, curr_first_check_point + ); + return Err(StatusCode::CheckPointsIsUnexpected.with_context(errmsg)); + } + if check_points.len() < 2 { + let errmsg = format!( + "expect at least 2 check points but got only {}", + check_points.len() + ); + return Err(StatusCode::CheckPointsIsUnexpected.with_context(errmsg)); + } + let check_points_len = check_points.len() as BlockNumber; + if start_number + self.check_point_interval * check_points_len <= last_proved_number { + self.inner.extend_from_slice(&check_points[1..]); + } else if check_points.len() > 2 { + let end = check_points.len() - 2; + self.inner.extend_from_slice(&check_points[1..=end]); + } + if self.if_require_next_check_point(last_proved_number) { + Ok(Some(self.number_of_next_check_point())) + } else { + Ok(None) + } + } + + fn remove_first_n_check_points(&mut self, n: usize) { + self.index_of_first_check_point += n as u32; + self.inner.drain(..n); + } +} + +impl LatestBlockFilterHashes { + fn new(check_point_number: BlockNumber) -> Self { + Self { + check_point_number, + inner: Vec::new(), + } + } + + #[cfg(test)] + fn mock(check_point_number: BlockNumber, inner: Vec) -> Self { + Self { + check_point_number, + inner, + } + } + + fn get_check_point_number(&self) -> BlockNumber { + self.check_point_number + } + + fn get_last_number(&self) -> BlockNumber { + self.get_check_point_number() + self.inner.len() as BlockNumber + } + + fn get_hashes(&self) -> Vec { + self.inner.clone() + } + + fn clear(&mut self) { + self.inner.clear(); + } + + fn reset(&mut self, new_check_point_number: BlockNumber) { + self.check_point_number = new_check_point_number; + self.clear(); + } + + fn update_latest_block_filter_hashes( + &mut self, + last_proved_number: BlockNumber, + finalized_check_point_number: BlockNumber, + finalized_check_point: &packed::Byte32, + start_number: BlockNumber, + parent_block_filter_hash: &packed::Byte32, + mut block_filter_hashes: &[packed::Byte32], + ) -> Result, Status> { + if block_filter_hashes.is_empty() { + return Err(StatusCode::BlockFilterHashesIsEmpty.into()); + } + // Check block numbers. + if finalized_check_point_number >= last_proved_number { + let errmsg = format!( + "finalized check point ({}) is not less than proved number ({})", + finalized_check_point_number, last_proved_number + ); + return Err(StatusCode::Ignore.with_context(errmsg)); + } + let check_point_number = self.get_check_point_number(); + if finalized_check_point_number != check_point_number { + let errmsg = format!( + "finalized check point ({}) is not same as cached ({})", + finalized_check_point_number, check_point_number + ); + return Err(StatusCode::Ignore.with_context(errmsg)); + } + let mut end_number = start_number + block_filter_hashes.len() as BlockNumber - 1; + if finalized_check_point_number >= end_number { + let errmsg = format!( + "finalized check point ({}) is not less than end number ({})", + finalized_check_point_number, end_number, + ); + return Err(StatusCode::Ignore.with_context(errmsg)); + } + if start_number > last_proved_number { + let errmsg = format!( + "start number ({}) is greater than the proved number ({})", + start_number, last_proved_number + ); + return Err(StatusCode::Ignore.with_context(errmsg)); + } + let last_filter_number = self.get_last_number(); + if start_number > last_filter_number + 1 { + let errmsg = format!( + "start number ({}) is continuous with last filter block number ({})", + start_number, last_filter_number + ); + return Err(StatusCode::Ignore.with_context(errmsg)); + } + if end_number > last_proved_number { + let diff = end_number - last_proved_number; + let new_length = block_filter_hashes.len() - diff as usize; + block_filter_hashes = &block_filter_hashes[..new_length]; + end_number = last_proved_number; + } + // Check block filter hashes. + let (start_index_for_old, start_index_for_new) = if start_number + <= finalized_check_point_number + { + let diff = finalized_check_point_number - start_number; + let index = diff as usize; + let check_hash = &block_filter_hashes[index]; + if check_hash != finalized_check_point { + let errmsg = format!( + "check point for block {} is {:#x} but check hash is {:#}", + finalized_check_point_number, finalized_check_point, check_hash + ); + return Err(StatusCode::BlockFilterHashesIsUnexpected.with_context(errmsg)); + } + (0, index + 1) + } else if start_number == finalized_check_point_number + 1 { + if parent_block_filter_hash != finalized_check_point { + let errmsg = format!( + "check point for block {} is {:#x} but parent hash is {:#}", + finalized_check_point_number, finalized_check_point, parent_block_filter_hash + ); + return Err(StatusCode::BlockFilterHashesIsUnexpected.with_context(errmsg)); + } + (0, 0) + } else { + let diff = start_number - finalized_check_point_number; + let index = diff as usize - 2; + let filter_hash = &self.inner[index]; + if filter_hash != parent_block_filter_hash { + let errmsg = format!( + "filter hash for block {} is {:#x} but parent hash is {:#}", + start_number - 1, + filter_hash, + parent_block_filter_hash + ); + return Err(StatusCode::BlockFilterHashesIsUnexpected.with_context(errmsg)); + } + (index + 1, 0) + }; + for (index, (old_hash, new_hash)) in self.inner[start_index_for_old..] + .iter() + .zip(block_filter_hashes[start_index_for_new..].iter()) + .enumerate() + { + if old_hash != new_hash { + let number = start_number + (start_index_for_old + index) as BlockNumber; + let errmsg = format!( + "old filter hash for block {} is {:#x} but new is {:#}", + number, old_hash, new_hash + ); + return Err(StatusCode::Ignore.with_context(errmsg)); + } + } + // Update block filter hashes. + let index = start_index_for_new + self.inner[start_index_for_old..].len(); + self.inner.extend_from_slice(&block_filter_hashes[index..]); + if end_number < last_proved_number { + Ok(Some(end_number + 1)) + } else { + Ok(None) + } + } +} + +impl Default for PeerState { + fn default() -> Self { + Self::Initialized + } +} + +impl fmt::Display for PeerState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let fullname = format!("PeerState::{}", self.name()); + if f.alternate() { + match self { + Self::Initialized => { + write!(f, "{}", fullname) + } + Self::RequestFirstLastState { when_sent } => { + write!(f, "{} {{ when_sent: {} }}", fullname, when_sent) + } + Self::OnlyHasLastState { last_state } => { + write!(f, "{} {{ last_state: {} }}", fullname, last_state) + } + Self::RequestFirstLastStateProof { + last_state, + request, + when_sent, + } => { + write!(f, "{} {{ last_state: {}", fullname, last_state)?; + write!(f, ", request: {}", request)?; + write!(f, ", when_sent: {}", when_sent)?; + write!(f, "}}") + } + Self::Ready { + last_state, + prove_state, + } => { + write!(f, "{} {{ last_state: {}", fullname, last_state)?; + write!(f, ", prove_state: {}", prove_state)?; + write!(f, "}}") + } + Self::RequestNewLastState { + last_state, + prove_state, + when_sent, + } => { + write!(f, "{} {{ last_state: {}", fullname, last_state)?; + write!(f, ", prove_state: {}", prove_state)?; + write!(f, ", when_sent: {}", when_sent)?; + write!(f, "}}") + } + Self::RequestNewLastStateProof { + last_state, + prove_state, + request, + when_sent, + } => { + write!(f, "{} {{ last_state: {}", fullname, last_state)?; + write!(f, ", prove_state: {}", prove_state)?; + write!(f, ", request: {}", request)?; + write!(f, ", when_sent: {}", when_sent)?; + write!(f, "}}") + } + } + } else { + match self { + Self::Initialized | Self::RequestFirstLastState { .. } => { + write!(f, "{}", fullname) + } + Self::OnlyHasLastState { last_state, .. } + | Self::RequestFirstLastStateProof { last_state, .. } + | Self::Ready { last_state, .. } + | Self::RequestNewLastState { last_state, .. } + | Self::RequestNewLastStateProof { last_state, .. } => { + write!(f, "{} {{ last_state: {} }}", fullname, last_state) + } + } + } + } +} + impl PeerState { + fn name(&self) -> &'static str { + match self { + Self::Initialized => "Initialized", + Self::RequestFirstLastState { .. } => "RequestFirstLastState", + Self::OnlyHasLastState { .. } => "OnlyHasLastState", + Self::RequestFirstLastStateProof { .. } => "RequestFirstLastStateProof", + Self::Ready { .. } => "Ready", + Self::RequestNewLastState { .. } => "RequestNewLastState", + Self::RequestNewLastStateProof { .. } => "RequestNewLastStateProof", + } + } + + fn take(&mut self) -> Self { + let mut ret = Self::Initialized; + mem::swap(self, &mut ret); + ret + } + pub(crate) fn get_last_state(&self) -> Option<&LastState> { - self.last_state.as_ref() + match self { + Self::Initialized | Self::RequestFirstLastState { .. } => None, + Self::OnlyHasLastState { ref last_state, .. } + | Self::RequestFirstLastStateProof { ref last_state, .. } + | Self::Ready { ref last_state, .. } + | Self::RequestNewLastState { ref last_state, .. } + | Self::RequestNewLastStateProof { ref last_state, .. } => Some(last_state), + } } pub(crate) fn get_prove_request(&self) -> Option<&ProveRequest> { - self.prove_request.as_ref() + match self { + Self::RequestFirstLastStateProof { ref request, .. } + | Self::RequestNewLastStateProof { ref request, .. } => Some(request), + Self::Initialized + | Self::OnlyHasLastState { .. } + | Self::RequestFirstLastState { .. } + | Self::Ready { .. } + | Self::RequestNewLastState { .. } => None, + } } pub(crate) fn get_prove_state(&self) -> Option<&ProveState> { - self.prove_state.as_ref() + match self { + Self::Ready { + ref prove_state, .. + } + | Self::RequestNewLastState { + ref prove_state, .. + } + | Self::RequestNewLastStateProof { + ref prove_state, .. + } => Some(prove_state), + Self::Initialized + | Self::RequestFirstLastState { .. } + | Self::OnlyHasLastState { .. } + | Self::RequestFirstLastStateProof { .. } => None, + } } - pub(crate) fn get_blocks_proof_request(&self) -> Option<&BlocksProofRequest> { - self.blocks_proof_request.as_ref() + fn request_last_state(self, when_sent: u64) -> Result { + match self { + Self::Initialized => { + let new_state = Self::RequestFirstLastState { when_sent }; + Ok(new_state) + } + Self::Ready { + last_state, + prove_state, + } => { + let new_state = Self::RequestNewLastState { + last_state, + prove_state, + when_sent, + }; + Ok(new_state) + } + _ => { + let errmsg = format!("{} request last state", self); + Err(StatusCode::IncorrectLastState.with_context(errmsg)) + } + } } - pub(crate) fn get_blocks_request(&self) -> Option<&BlocksRequest> { - self.blocks_request.as_ref() + + fn receive_last_state(mut self, new_last_state: LastState) -> Result { + match self { + Self::RequestFirstLastState { .. } => { + let new_state = Self::OnlyHasLastState { + last_state: new_last_state, + }; + Ok(new_state) + } + Self::RequestNewLastState { prove_state, .. } => { + let new_state = Self::Ready { + last_state: new_last_state, + prove_state, + }; + Ok(new_state) + } + Self::OnlyHasLastState { ref mut last_state } + | Self::RequestFirstLastStateProof { + ref mut last_state, .. + } + | Self::Ready { + ref mut last_state, .. + } + | Self::RequestNewLastStateProof { + ref mut last_state, .. + } => { + *last_state = new_last_state; + Ok(self) + } + _ => { + let errmsg = format!("{} receive last state", self); + Err(StatusCode::IncorrectLastState.with_context(errmsg)) + } + } } - pub(crate) fn get_txs_proof_request(&self) -> Option<&TransactionsProofRequest> { - self.txs_proof_request.as_ref() + + fn request_last_state_proof( + mut self, + new_request: ProveRequest, + new_when_sent: u64, + ) -> Result { + match self { + Self::OnlyHasLastState { last_state, .. } => { + let new_state = Self::RequestFirstLastStateProof { + last_state, + request: new_request, + when_sent: new_when_sent, + }; + Ok(new_state) + } + Self::Ready { + last_state, + prove_state, + .. + } => { + let new_state = Self::RequestNewLastStateProof { + last_state, + prove_state, + request: new_request, + when_sent: new_when_sent, + }; + Ok(new_state) + } + Self::RequestFirstLastStateProof { + ref mut request, + ref mut when_sent, + .. + } + | Self::RequestNewLastStateProof { + ref mut request, + ref mut when_sent, + .. + } => { + *request = new_request; + *when_sent = new_when_sent; + Ok(self) + } + _ => { + let errmsg = format!("{} request last state proof", self); + Err(StatusCode::IncorrectLastState.with_context(errmsg)) + } + } } - fn update_last_state(&mut self, last_state: LastState) { - self.last_state = Some(last_state); + fn receive_last_state_proof(self, new_prove_state: ProveState) -> Result { + match self { + Self::OnlyHasLastState { last_state } + | Self::RequestFirstLastStateProof { last_state, .. } + | Self::Ready { last_state, .. } + | Self::RequestNewLastStateProof { last_state, .. } => { + let new_state = Self::Ready { + last_state, + prove_state: new_prove_state, + }; + Ok(new_state) + } + _ => { + let errmsg = format!("{} receive last state proof", self); + Err(StatusCode::IncorrectLastState.with_context(errmsg)) + } + } } - fn update_prove_request(&mut self, request: Option) { - self.prove_request = request; + fn require_new_last_state(&self, before_ts: u64) -> bool { + self.get_last_state() + .map(|last_state| last_state.update_ts < before_ts) + .unwrap_or(true) + } + + fn require_new_last_state_proof(&self) -> bool { + match self { + Self::Ready { + ref last_state, + ref prove_state, + } => !prove_state.is_same_as(last_state.as_ref()), + Self::OnlyHasLastState { .. } => true, + Self::Initialized + | Self::RequestFirstLastState { .. } + | Self::RequestFirstLastStateProof { .. } + | Self::RequestNewLastState { .. } + | Self::RequestNewLastStateProof { .. } => false, + } } - fn update_prove_state(&mut self, state: ProveState) { - self.prove_state = Some(state); + fn when_sent_request(&self) -> Option { + match self { + Self::Initialized | Self::OnlyHasLastState { .. } | Self::Ready { .. } => None, + Self::RequestFirstLastState { when_sent } + | Self::RequestFirstLastStateProof { when_sent, .. } + | Self::RequestNewLastState { when_sent, .. } + | Self::RequestNewLastStateProof { when_sent, .. } => Some(*when_sent), + } } +} - fn update_blocks_proof_request(&mut self, request: Option) { - self.blocks_proof_request = request; +impl Peer { + fn new(check_point_interval: BlockNumber, start_check_point: (u32, packed::Byte32)) -> Self { + let check_points = CheckPoints::new( + check_point_interval, + start_check_point.0, + start_check_point.1, + ); + let check_point_number = check_point_interval * BlockNumber::from(start_check_point.0); + let latest_block_filter_hashes = LatestBlockFilterHashes::new(check_point_number); + Self { + state: Default::default(), + blocks_proof_request: None, + blocks_request: None, + txs_proof_request: None, + check_points, + latest_block_filter_hashes, + } } - fn update_blocks_request(&mut self, request: Option) { - self.blocks_request = request; + + pub(crate) fn get_blocks_proof_request(&self) -> Option<&BlocksProofRequest> { + self.blocks_proof_request.as_ref() } - fn update_txs_proof_request(&mut self, request: Option) { - self.txs_proof_request = request; + pub(crate) fn get_blocks_request(&self) -> Option<&BlocksRequest> { + self.blocks_request.as_ref() + } + pub(crate) fn get_txs_proof_request(&self) -> Option<&TransactionsProofRequest> { + self.txs_proof_request.as_ref() } fn add_block(&mut self, block_hash: &Byte32) { @@ -376,31 +1075,44 @@ impl PeerState { false }; if finished { - self.update_blocks_request(None); - } - } -} - -impl Peer { - fn new(update_timestamp: u64) -> Self { - Self { - state: Default::default(), - update_timestamp, + self.blocks_request = None; } } } impl Peers { - // only used in unit tests now - #[cfg(test)] - pub fn new(last_headers: RwLock>) -> Self { + pub fn new( + max_outbound_peers: u32, + check_point_interval: BlockNumber, + start_check_point: (u32, packed::Byte32), + ) -> Self { Self { inner: Default::default(), - last_headers, + last_headers: Default::default(), fetching_headers: DashMap::new(), fetching_txs: DashMap::new(), matched_blocks: Default::default(), + cached_block_filter_hashes: Default::default(), + max_outbound_peers, + check_point_interval, + start_check_point, + } + } + + pub(crate) fn required_peers_count(&self) -> usize { + let required_peers_count = ((self.get_max_outbound_peers() + 1) / 2) as usize; + if required_peers_count == 0 { + panic!("max outbound peers shouldn't be zero!"); } + required_peers_count + } + + pub(crate) fn calc_check_point_number(&self, index: u32) -> BlockNumber { + self.check_point_interval * BlockNumber::from(index) + } + + fn calc_best_check_point_index_not_greater_than(&self, number: BlockNumber) -> u32 { + (number / self.check_point_interval) as u32 } pub(crate) fn last_headers(&self) -> &RwLock> { @@ -453,9 +1165,9 @@ impl Peers { } } // mark all fetching hashes (headers/txs) as timeout - pub(crate) fn mark_fetching_headers_timeout(&self, peer: PeerIndex) { - if let Some(peer_state) = self.get_state(&peer) { - if let Some(request) = peer_state.get_blocks_proof_request() { + pub(crate) fn mark_fetching_headers_timeout(&self, peer_index: PeerIndex) { + if let Some(peer) = self.get_peer(&peer_index) { + if let Some(request) = peer.get_blocks_proof_request() { for block_hash in request.block_hashes() { if let Some(mut pair) = self.fetching_headers.get_mut(&block_hash.pack()) { pair.value_mut().timeout = true; @@ -464,9 +1176,9 @@ impl Peers { } } } - pub(crate) fn mark_fetching_txs_timeout(&self, peer: PeerIndex) { - if let Some(peer_state) = self.get_state(&peer) { - if let Some(request) = peer_state.get_txs_proof_request() { + pub(crate) fn mark_fetching_txs_timeout(&self, peer_index: PeerIndex) { + if let Some(peer) = self.get_peer(&peer_index) { + if let Some(request) = peer.get_txs_proof_request() { for tx_hash in request.tx_hashes() { if let Some(mut pair) = self.fetching_txs.get_mut(&tx_hash.pack()) { pair.value_mut().timeout = true; @@ -500,9 +1212,12 @@ impl Peers { &self.matched_blocks } + pub(crate) fn get_max_outbound_peers(&self) -> u32 { + self.max_outbound_peers + } + pub(crate) fn add_peer(&self, index: PeerIndex) { - let now = unix_time_as_millis(); - let peer = Peer::new(now); + let peer = Peer::new(self.check_point_interval, self.start_check_point.clone()); self.inner.insert(index, peer); } @@ -520,45 +1235,83 @@ impl Peers { self.inner.get(index).map(|peer| peer.state.clone()) } - pub(crate) fn update_last_state(&self, index: PeerIndex, last_state: LastState) { - if let Some(mut peer) = self.inner.get_mut(&index) { - peer.state.update_last_state(last_state); - } + pub(crate) fn get_peer(&self, index: &PeerIndex) -> Option { + self.inner.get(index).map(|peer| peer.clone()) } - pub(crate) fn update_timestamp(&self, index: PeerIndex, timestamp: u64) { + #[cfg(test)] + pub(crate) fn mock_prove_request( + &self, + index: PeerIndex, + request: ProveRequest, + ) -> Result<(), Status> { + let last_state = LastState::new(request.get_last_header().to_owned()); + self.request_last_state(index)?; + self.update_last_state(index, last_state)?; + self.update_prove_request(index, request) + } + + #[cfg(test)] + pub(crate) fn mock_prove_state( + &self, + index: PeerIndex, + tip_header: VerifiableHeader, + ) -> Result<(), Status> { + let last_state = LastState::new(tip_header); + let request = ProveRequest::new(last_state.clone(), Default::default()); + let prove_state = + ProveState::new_from_request(request.clone(), Default::default(), Default::default()); + self.request_last_state(index)?; + self.update_last_state(index, last_state)?; + self.update_prove_request(index, request)?; + self.update_prove_state(index, prove_state) + } + + pub(crate) fn request_last_state(&self, index: PeerIndex) -> Result<(), Status> { if let Some(mut peer) = self.inner.get_mut(&index) { - peer.update_timestamp = timestamp; + let now = unix_time_as_millis(); + peer.state = peer.state.take().request_last_state(now)?; } + Ok(()) } - pub(crate) fn update_prove_request(&self, index: PeerIndex, request: Option) { - let now = unix_time_as_millis(); + pub(crate) fn update_last_state( + &self, + index: PeerIndex, + last_state: LastState, + ) -> Result<(), Status> { if let Some(mut peer) = self.inner.get_mut(&index) { - peer.state.update_prove_request(request); - peer.update_timestamp = now; + peer.state = peer.state.take().receive_last_state(last_state)?; } + Ok(()) } - /// Update the prove state without any requests. - pub(crate) fn update_prove_state(&self, index: PeerIndex, state: ProveState) { - let now = unix_time_as_millis(); + pub(crate) fn update_prove_request( + &self, + index: PeerIndex, + request: ProveRequest, + ) -> Result<(), Status> { if let Some(mut peer) = self.inner.get_mut(&index) { - peer.state.update_prove_state(state); - peer.update_timestamp = now; + let now = unix_time_as_millis(); + peer.state = peer.state.take().request_last_state_proof(request, now)?; } + Ok(()) } - /// Commit the prove state from the previous request. - pub(crate) fn commit_prove_state(&self, index: PeerIndex, state: ProveState) { + pub(crate) fn update_prove_state( + &self, + index: PeerIndex, + state: ProveState, + ) -> Result<(), Status> { *self.last_headers.write().expect("poisoned") = state.get_last_headers().to_vec(); - - let now = unix_time_as_millis(); if let Some(mut peer) = self.inner.get_mut(&index) { - peer.state.update_prove_state(state); - peer.state.update_prove_request(None); - peer.update_timestamp = now; + let has_reorg = !state.reorg_last_headers.is_empty(); + peer.state = peer.state.take().receive_last_state_proof(state)?; + if has_reorg { + peer.latest_block_filter_hashes.clear(); + } } + Ok(()) } pub(crate) fn add_block( @@ -568,7 +1321,7 @@ impl Peers { ) -> Option { let block_hash = block.header().calc_header_hash(); for mut pair in self.inner.iter_mut() { - pair.value_mut().state.add_block(&block_hash); + pair.value_mut().add_block(&block_hash); } matched_blocks .get_mut(&block_hash.unpack()) @@ -646,8 +1399,8 @@ impl Peers { ) -> Vec { let mut proof_requested_hashes = HashSet::new(); for pair in self.inner.iter() { - let peer_state = &pair.value().state; - if let Some(req) = peer_state.get_blocks_proof_request() { + let peer = &pair.value(); + if let Some(req) = peer.get_blocks_proof_request() { for hash in req.block_hashes() { proof_requested_hashes.insert(hash); } @@ -673,8 +1426,8 @@ impl Peers { ) -> Vec { let mut block_requested_hashes = HashSet::new(); for pair in self.inner.iter() { - let peer_state = &pair.value().state; - if let Some(req) = peer_state.get_blocks_request() { + let peer = &pair.value(); + if let Some(req) = peer.get_blocks_request() { for hash in req.hashes.keys() { block_requested_hashes.insert(hash.clone()); } @@ -725,16 +1478,14 @@ impl Peers { request: Option, ) { if let Some(mut peer) = self.inner.get_mut(&index) { - peer.state.update_blocks_proof_request( - request.map(|content| BlocksProofRequest::new(content, unix_time_as_millis())), - ); + peer.blocks_proof_request = + request.map(|content| BlocksProofRequest::new(content, unix_time_as_millis())); } } pub(crate) fn update_blocks_request(&self, index: PeerIndex, hashes: Option>) { if let Some(mut peer) = self.inner.get_mut(&index) { - peer.state.update_blocks_request( - hashes.map(|hashes| BlocksRequest::new(hashes, unix_time_as_millis())), - ); + peer.blocks_request = + hashes.map(|hashes| BlocksRequest::new(hashes, unix_time_as_millis())); } } pub(crate) fn update_txs_proof_request( @@ -743,19 +1494,124 @@ impl Peers { request: Option, ) { if let Some(mut peer) = self.inner.get_mut(&index) { - peer.state.update_txs_proof_request( - request - .map(|content| TransactionsProofRequest::new(content, unix_time_as_millis())), - ); + peer.txs_proof_request = request + .map(|content| TransactionsProofRequest::new(content, unix_time_as_millis())); + } + } + + pub(crate) fn add_check_points( + &self, + index: PeerIndex, + last_proved_number: BlockNumber, + start_number: BlockNumber, + check_points: &[packed::Byte32], + ) -> Result, Status> { + if let Some(mut peer) = self.inner.get_mut(&index) { + peer.check_points + .add_check_points(last_proved_number, start_number, check_points) + } else { + Err(StatusCode::PeerIsNotFound.into()) + } + } + + pub(crate) fn remove_first_n_check_points(&self, index: PeerIndex, n: usize) { + if let Some(mut peer) = self.inner.get_mut(&index) { + peer.check_points.remove_first_n_check_points(n); + let number = peer.check_points.number_of_first_check_point(); + peer.latest_block_filter_hashes.reset(number); + } + } + + #[cfg(test)] + pub(crate) fn mock_latest_block_filter_hashes( + &self, + index: PeerIndex, + check_point_number: BlockNumber, + block_filter_hashes: Vec, + ) { + if let Some(mut peer) = self.inner.get_mut(&index) { + peer.latest_block_filter_hashes = + LatestBlockFilterHashes::mock(check_point_number, block_filter_hashes); + } + } + + #[allow(clippy::too_many_arguments)] // TODO fix clippy + pub(crate) fn update_latest_block_filter_hashes( + &self, + index: PeerIndex, + last_proved_number: BlockNumber, + finalized_check_point_index: u32, + finalized_check_point: &packed::Byte32, + start_number: BlockNumber, + parent_block_filter_hash: &packed::Byte32, + block_filter_hashes: &[packed::Byte32], + ) -> Result, Status> { + if let Some(mut peer) = self.inner.get_mut(&index) { + let finalized_check_point_number = + self.calc_check_point_number(finalized_check_point_index); + peer.latest_block_filter_hashes + .update_latest_block_filter_hashes( + last_proved_number, + finalized_check_point_number, + finalized_check_point, + start_number, + parent_block_filter_hash, + block_filter_hashes, + ) + } else { + Err(StatusCode::PeerIsNotFound.into()) + } + } + + pub(crate) fn update_min_filtered_block_number(&self, min_filtered_block_number: BlockNumber) { + let should_cached_check_point_index = + self.calc_best_check_point_index_not_greater_than(min_filtered_block_number); + let current_cached_check_point_index = + self.cached_block_filter_hashes.read().expect("poisoned").0; + if current_cached_check_point_index != should_cached_check_point_index { + let mut tmp = self.cached_block_filter_hashes.write().expect("poisoned"); + tmp.0 = should_cached_check_point_index; + tmp.1.clear(); + } + } + + pub(crate) fn get_cached_block_filter_hashes(&self) -> (u32, Vec) { + self.cached_block_filter_hashes + .read() + .expect("poisoned") + .clone() + } + + pub(crate) fn update_cached_block_filter_hashes(&self, hashes: Vec) { + self.cached_block_filter_hashes.write().expect("poisoned").1 = hashes; + } + + pub(crate) fn if_cached_block_filter_hashes_require_update( + &self, + finalized_check_point_index: u32, + ) -> Option { + let (cached_index, cached_length) = { + let tmp = self.cached_block_filter_hashes.read().expect("poisoned"); + (tmp.0, tmp.1.len()) + }; + if cached_index >= finalized_check_point_index { + return None; + } + if cached_length as BlockNumber >= self.check_point_interval { + return None; } + let cached_last_number = + self.calc_check_point_number(cached_index) + cached_length as BlockNumber; + Some(cached_last_number + 1) } - pub(crate) fn get_peers_which_require_updating(&self, before_timestamp: u64) -> Vec { + pub(crate) fn get_peers_which_require_new_state(&self, before_ts: u64) -> Vec { self.inner .iter() .filter_map(|item| { - if item.value().update_timestamp < before_timestamp { - Some(*item.key()) + let (peer_index, peer) = item.pair(); + if peer.state.require_new_last_state(before_ts) { + Some(*peer_index) } else { None } @@ -763,50 +1619,235 @@ impl Peers { .collect() } + pub(crate) fn get_peers_which_require_new_proof(&self) -> Vec { + self.inner + .iter() + .filter_map(|item| { + let (peer_index, peer) = item.pair(); + if peer.state.require_new_last_state_proof() { + Some(*peer_index) + } else { + None + } + }) + .collect() + } + + pub(crate) fn get_peers_which_require_more_check_points( + &self, + ) -> Vec<(PeerIndex, BlockNumber)> { + self.inner + .iter() + .filter_map(|item| { + let (peer_index, peer) = item.pair(); + peer.state.get_prove_state().and_then(|state| { + let proved_number = state.get_last_header().header().number(); + let check_points = &item.value().check_points; + if check_points.if_require_next_check_point(proved_number) { + let next_check_point_number = check_points.number_of_next_check_point(); + Some((*peer_index, next_check_point_number)) + } else { + None + } + }) + }) + .collect() + } + + pub(crate) fn get_peers_which_require_more_latest_block_filter_hashes( + &self, + finalized_check_point_index: u32, + ) -> Vec<(PeerIndex, BlockNumber)> { + self.inner + .iter() + .filter_map(|item| { + let (peer_index, peer) = item.pair(); + peer.state.get_prove_state().and_then(|state| { + let latest_block_filter_hashes = &item.value().latest_block_filter_hashes; + let check_point_number = latest_block_filter_hashes.get_check_point_number(); + let finalized_check_point_number = + self.calc_check_point_number(finalized_check_point_index); + if check_point_number == finalized_check_point_number { + let proved_number = state.get_last_header().header().number(); + let last_number = latest_block_filter_hashes.get_last_number(); + if last_number < proved_number { + Some((*peer_index, last_number + 1)) + } else { + None + } + } else { + None + } + }) + }) + .collect() + } + + pub(crate) fn get_latest_block_filter_hashes( + &self, + finalized_check_point_index: u32, + ) -> Vec { + let finalized_check_point_number = + self.calc_check_point_number(finalized_check_point_index); + let mut peers_with_data = self + .inner + .iter() + .filter_map(|item| { + let (peer_index, peer) = item.pair(); + peer.state.get_prove_state().and_then(|_| { + let latest_block_filter_hashes = &item.value().latest_block_filter_hashes; + let check_point_number = latest_block_filter_hashes.get_check_point_number(); + if finalized_check_point_number == check_point_number { + Some((*peer_index, latest_block_filter_hashes.get_hashes())) + } else { + None + } + }) + }) + .collect::>(); + let required_peers_count = self.required_peers_count(); + if peers_with_data.len() < required_peers_count { + return Vec::new(); + } + let length_max = { + let mut hashes_sizes = peers_with_data + .values() + .map(|hashes| hashes.len()) + .collect::>(); + hashes_sizes.sort(); + hashes_sizes[required_peers_count - 1] + }; + let mut result = Vec::new(); + for index in 0..length_max { + let map = peers_with_data + .values() + .map(|hashes| hashes.get(index)) + .fold(HashMap::new(), |mut map, hash_opt| { + if let Some(h) = hash_opt { + *map.entry(h.clone()).or_default() += 1; + } + map + }); + let count_max = map.values().max().cloned().unwrap_or(0); + if count_max >= required_peers_count { + let hash_opt = + map.into_iter().find_map( + |(hash, count)| { + if count == count_max { + Some(hash) + } else { + None + } + }, + ); + let hash = hash_opt.expect("checked: must be found"); + if count_max != peers_with_data.len() { + peers_with_data + .retain(|_, hashes| matches!(hashes.get(index), Some(tmp) if *tmp == hash)); + } + result.push(hash); + } else { + break; + } + } + result + } + + pub(crate) fn could_request_more_block_filters( + &self, + finalized_check_point_index: u32, + min_filtered_block_number: BlockNumber, + ) -> bool { + let should_cached_check_point_index = + self.calc_best_check_point_index_not_greater_than(min_filtered_block_number); + if should_cached_check_point_index >= finalized_check_point_index { + let finalized_check_point_number = + self.calc_check_point_number(finalized_check_point_index); + let latest_block_filter_hashes_count = self + .get_latest_block_filter_hashes(finalized_check_point_index) + .len(); + finalized_check_point_number + latest_block_filter_hashes_count as BlockNumber + >= min_filtered_block_number + } else { + // Check: + // - If cached block filter hashes is same check point as the required, + // - If all block filter hashes in that check point are downloaded. + let cached_data = self.get_cached_block_filter_hashes(); + let current_cached_check_point_index = cached_data.0; + should_cached_check_point_index == current_cached_check_point_index + && cached_data.1.len() as BlockNumber == self.check_point_interval + } + } + pub(crate) fn get_peers_which_have_timeout(&self, now: u64) -> Vec { self.inner .iter() .filter_map(|item| { - let peer_state = &item.value().state; - peer_state - .get_blocks_proof_request() - .and_then(|req| { - if now > req.when_sent + MESSAGE_TIMEOUT { - Some(*item.key()) + let (peer_index, peer) = item.pair(); + peer.state + .when_sent_request() + .and_then(|when_sent| { + if now > when_sent + MESSAGE_TIMEOUT { + Some(*peer_index) } else { None } }) .or_else(|| { - peer_state.get_blocks_request().and_then(|req| { + peer.get_blocks_proof_request().and_then(|req| { if now > req.when_sent + MESSAGE_TIMEOUT { - Some(*item.key()) + Some(*peer_index) } else { None } }) }) .or_else(|| { - peer_state.get_txs_proof_request().and_then(|req| { + peer.get_blocks_request().and_then(|req| { if now > req.when_sent + MESSAGE_TIMEOUT { - Some(*item.key()) + Some(*peer_index) } else { None } }) }) + .or_else(|| { + peer.get_txs_proof_request().and_then(|req| { + if now > req.when_sent + MESSAGE_TIMEOUT { + Some(*peer_index) + } else { + None + } + }) + }) + }) + .collect() + } + + pub(crate) fn get_all_proved_check_points( + &self, + ) -> HashMap)> { + self.inner + .iter() + .filter_map(|item| { + let (peer_index, peer) = item.pair(); + peer.state.get_prove_state().map(|_| { + let start_index = peer.check_points.get_start_index(); + let check_points = peer.check_points.get_check_points(); + (*peer_index, (start_index, check_points)) + }) }) .collect() } - pub(crate) fn get_peers_which_are_proved(&self) -> Vec<(PeerIndex, ProveState)> { + pub(crate) fn get_all_prove_states(&self) -> Vec<(PeerIndex, ProveState)> { self.inner .iter() .filter_map(|item| { - item.value() - .state + let (peer_index, peer) = item.pair(); + peer.state .get_prove_state() - .map(|state| (*item.key(), state.to_owned())) + .map(|state| (*peer_index, state.to_owned())) }) .collect() } @@ -816,21 +1857,19 @@ impl Peers { header: &VerifiableHeader, ) -> Option<(PeerIndex, ProveState)> { self.inner.iter().find_map(|item| { - item.value() - .state - .get_prove_state() - .and_then(|prove_state| { - if prove_state.is_same_as(header) { - Some((*item.key(), prove_state.clone())) - } else { - None - } - }) + let (peer_index, peer) = item.pair(); + peer.state.get_prove_state().and_then(|prove_state| { + if prove_state.is_same_as(header) { + Some((*peer_index, prove_state.clone())) + } else { + None + } + }) }) } pub(crate) fn get_best_proved_peers(&self, best_tip: &packed::Header) -> Vec { - self.get_peers_which_are_proved() + self.get_all_prove_states() .into_iter() .filter(|(_, prove_state)| { Some(prove_state.get_last_header().header()) diff --git a/src/protocols/mod.rs b/src/protocols/mod.rs index 1da5ae5..fbdb11d 100644 --- a/src/protocols/mod.rs +++ b/src/protocols/mod.rs @@ -31,3 +31,5 @@ pub const LAST_N_BLOCKS: BlockNumber = 100; pub const GET_BLOCKS_PROOF_LIMIT: usize = 1000; // Copy from ckb/util/light-client-protocol-server pub const GET_TRANSACTIONS_PROOF_LIMIT: usize = 1000; +// Copy from ckb/sync +pub const CHECK_POINT_INTERVAL: BlockNumber = 2000; diff --git a/src/protocols/status.rs b/src/protocols/status.rs index 7330523..df1fd6b 100644 --- a/src/protocols/status.rs +++ b/src/protocols/status.rs @@ -1,5 +1,7 @@ -#![allow(dead_code)] -use std::{fmt, time::Duration}; +use std::{fmt, sync::Arc, time::Duration}; + +use ckb_network::{CKBProtocolContext, PeerIndex}; +use log::{debug, error, trace, warn}; use super::BAD_MESSAGE_BAN_TIME; @@ -14,6 +16,7 @@ use super::BAD_MESSAGE_BAN_TIME; /// - 5xx: Local errors - The client failed to process a response. #[repr(u16)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[allow(dead_code)] pub enum StatusCode { /// OK OK = 200, @@ -25,10 +28,12 @@ pub enum StatusCode { /// Unexpected light-client protocol message. UnexpectedProtocolMessage = 401, - /// The peer state is not found. - PeerStateIsNotFound = 411, + /// The peer is not found. + PeerIsNotFound = 411, /// The last state sent from server is invalid. InvalidLastState = 412, + /// The peer state is not correct for transition. + IncorrectLastState = 413, /// Receives a response but the peer isn't waiting for a response. PeerIsNotOnProcess = 421, @@ -55,10 +60,27 @@ pub enum StatusCode { /// Reorg headers for a last state proof is invalid. InvalidReorgHeaders = 452, + // Errors for block filter protocol. + /// Check points is empty. + CheckPointsIsEmpty = 471, + /// Check points is unaligned. + CheckPointsIsUnaligned = 472, + /// Check points is unexpected. + CheckPointsIsUnexpected = 473, + /// Block filter hashes is empty. + BlockFilterHashesIsEmpty = 481, + /// Block filter hashes is unexpected. + BlockFilterHashesIsUnexpected = 482, + /// Block filter data is unexpected. + BlockFilterDataIsUnexpected = 483, + /// Throws an internal error. InternalError = 500, /// Throws an error from the network. Network = 501, + + /// Throws an error that could be ignored. + Ignore = 599, } /// Process message status. @@ -128,12 +150,13 @@ impl Status { /// Whether the code is `OK` or not. pub fn is_ok(&self) -> bool { - self.code == StatusCode::OK || self.code == StatusCode::RequireRecheck + let code = self.code(); + code == StatusCode::OK || code == StatusCode::RequireRecheck } /// Whether the session should be banned. pub fn should_ban(&self) -> Option { - let code = self.code as u16; + let code = self.code() as u16; if (400..500).contains(&code) { Some(BAD_MESSAGE_BAN_TIME) } else { @@ -143,7 +166,7 @@ impl Status { /// Whether a warning log should be output. pub fn should_warn(&self) -> bool { - let code = self.code as u16; + let code = self.code() as u16; (500..600).contains(&code) } @@ -151,4 +174,38 @@ impl Status { pub fn code(&self) -> StatusCode { self.code } + + pub fn process( + &self, + nc: Arc, + index: PeerIndex, + protocol: &str, + message: &str, + ) { + if let Some(ban_time) = self.should_ban() { + error!( + "{}Protocol.received {} from {}, result {}, ban {:?}", + protocol, message, index, self, ban_time + ); + nc.ban_peer(index, ban_time, self.to_string()); + } else if self.should_warn() { + warn!( + "{}Protocol.received {} from {}, result {}", + protocol, message, index, self + ); + } else if self.is_ok() { + trace!( + "{}Protocol.received {} from {}, result {}", + protocol, + message, + index, + self + ); + } else { + debug!( + "{}Protocol.received {} from {}, result {}", + protocol, message, index, self + ); + } + } } diff --git a/src/service.rs b/src/service.rs index 2098c06..e0e6219 100644 --- a/src/service.rs +++ b/src/service.rs @@ -4,9 +4,9 @@ use ckb_jsonrpc_types::{ OutPoint, RemoteNodeProtocol, Script, Transaction, TransactionView, Uint32, Uint64, }; use ckb_network::{extract_peer_id, NetworkController}; +use ckb_systemtime::unix_time_as_millis; use ckb_traits::HeaderProvider; use ckb_types::{core, packed, prelude::*, H256}; -use faketime::unix_time_as_millis; use jsonrpc_core::{Error, IoHandler, Result}; use jsonrpc_derive::rpc; use jsonrpc_http_server::{Server, ServerBuilder}; diff --git a/src/storage.rs b/src/storage.rs index 66dd0ed..1a48a8d 100644 --- a/src/storage.rs +++ b/src/storage.rs @@ -10,7 +10,7 @@ use ckb_types::{ bytes::Bytes, core::{ cell::{CellMeta, CellProvider, CellStatus}, - BlockNumber, HeaderView, TransactionInfo, + BlockNumber, BlockView, HeaderView, TransactionInfo, }, packed::{self, Block, Byte32, CellOutput, Header, OutPoint, Script, Transaction}, prelude::*, @@ -20,6 +20,7 @@ use ckb_types::{ use rocksdb::{prelude::*, Direction, IteratorMode, WriteBatch, DB}; use crate::error::Result; +use crate::patches::{build_filter_data, calc_filter_hash, FilterDataProvider}; use crate::protocols::Peers; pub const LAST_STATE_KEY: &str = "LAST_STATE"; @@ -28,6 +29,7 @@ const FILTER_SCRIPTS_KEY: &str = "FILTER_SCRIPTS"; const MATCHED_FILTER_BLOCKS_KEY: &str = "MATCHED_BLOCKS"; const MIN_FILTERED_BLOCK_NUMBER: &str = "MIN_FILTERED_NUMBER"; const LAST_N_HEADERS_KEY: &str = "LAST_N_HEADERS"; +const MAX_CHECK_POINT_INDEX: &str = "MAX_CHECK_POINT_INDEX"; pub struct ScriptStatus { pub script: Script, @@ -53,6 +55,34 @@ pub enum ScriptType { Type, } +struct WrappedBlockView<'a> { + inner: &'a BlockView, + index: HashMap, +} + +impl<'a> WrappedBlockView<'a> { + fn new(inner: &'a BlockView) -> Self { + let index = inner + .transactions() + .into_iter() + .enumerate() + .map(|(index, tx)| (tx.hash(), index)) + .collect(); + Self { inner, index } + } +} + +impl<'a> FilterDataProvider for WrappedBlockView<'a> { + fn cell(&self, out_point: &OutPoint) -> Option { + self.index.get(&out_point.tx_hash()).and_then(|tx_index| { + self.inner + .transactions() + .get(*tx_index) + .and_then(|tx| tx.outputs().get(out_point.index().unpack())) + }) + } +} + #[derive(Clone)] pub struct Storage { pub(crate) db: Arc, @@ -128,6 +158,20 @@ impl Storage { .expect("batch put should be ok"); batch.commit().expect("batch commit should be ok"); self.update_last_state(&U256::zero(), &block.header(), &[]); + let genesis_block_filter_hash: Byte32 = { + let block_view = block.into_view(); + let provider = WrappedBlockView::new(&block_view); + let parent_block_filter_hash = Byte32::zero(); + let (genesis_block_filter_vec, missing_out_points) = + build_filter_data(provider, &block_view.transactions()); + if !missing_out_points.is_empty() { + panic!("Genesis block shouldn't missing any out points."); + } + let genesis_block_filter_data = genesis_block_filter_vec.pack(); + calc_filter_hash(&parent_block_filter_hash, &genesis_block_filter_data).pack() + }; + self.update_max_check_point_index(0); + self.update_check_points(0, &[genesis_block_filter_hash]); self.update_min_filtered_block_number(0); } } @@ -517,6 +561,57 @@ impl Storage { .expect("db put min filtered block number should be ok"); } + pub fn get_last_check_point(&self) -> (CpIndex, Byte32) { + let index = self.get_max_check_point_index(); + let hash = self + .get_check_points(index, 1) + .get(0) + .cloned() + .expect("db get last check point should be ok"); + (index, hash) + } + + pub fn get_max_check_point_index(&self) -> CpIndex { + let key = Key::Meta(MAX_CHECK_POINT_INDEX).into_vec(); + self.db + .get_pinned(&key) + .expect("db get max check point index should be ok") + .map(|data| CpIndex::from_be_bytes(data.as_ref().try_into().unwrap())) + .expect("db get max check point index should be ok") + } + + pub fn update_max_check_point_index(&self, index: CpIndex) { + let key = Key::Meta(MAX_CHECK_POINT_INDEX).into_vec(); + let value = index.to_be_bytes(); + self.db + .put(key, value) + .expect("db put max check point index should be ok"); + } + + pub fn get_check_points(&self, start_index: CpIndex, limit: usize) -> Vec { + let start_key = Key::CheckPointIndex(start_index).into_vec(); + let key_prefix = [KeyPrefix::CheckPointIndex as u8]; + let mode = IteratorMode::From(start_key.as_ref(), Direction::Forward); + self.db + .iterator(mode) + .take_while(|(key, _value)| key.starts_with(&key_prefix)) + .take(limit) + .map(|(_key, value)| Byte32::from_slice(&value).expect("stored block filter hash")) + .collect() + } + + pub fn update_check_points(&self, start_index: CpIndex, check_points: &[Byte32]) { + let mut index = start_index; + let mut batch = self.batch(); + for cp in check_points { + let key = Key::CheckPointIndex(index).into_vec(); + let value = Value::BlockFilterHash(cp); + batch.put_kv(key, value).expect("batch put should be ok"); + index += 1; + } + batch.commit().expect("batch commit should be ok"); + } + pub fn update_block_number(&self, block_number: BlockNumber) { let key_prefix = Key::Meta(FILTER_SCRIPTS_KEY).into_vec(); let mode = IteratorMode::From(key_prefix.as_ref(), Direction::Forward); @@ -1084,6 +1179,7 @@ impl Batch { } pub type TxIndex = u32; +pub type CpIndex = u32; pub type OutputIndex = u32; pub type CellIndex = u32; pub enum CellType { @@ -1102,6 +1198,7 @@ pub enum CellType { /// | 128 | TxTypeScript | TxHash | /// | 160 | BlockHash | Header | /// | 192 | BlockNumber | BlockHash | +/// | 208 | CheckPointIndex | BlockFilterHash | /// | 224 | Meta | Meta | /// +--------------+--------------------+--------------------------+ /// @@ -1113,6 +1210,8 @@ pub enum Key<'a> { TxTypeScript(&'a Script, BlockNumber, TxIndex, CellIndex, CellType), BlockHash(&'a Byte32), BlockNumber(BlockNumber), + // The index number for check points. + CheckPointIndex(CpIndex), Meta(&'a str), } @@ -1121,6 +1220,7 @@ pub enum Value<'a> { TxHash(&'a Byte32), Header(&'a Header), BlockHash(&'a Byte32), + BlockFilterHash(&'a Byte32), Meta(Vec), } @@ -1133,6 +1233,7 @@ pub enum KeyPrefix { TxTypeScript = 128, BlockHash = 160, BlockNumber = 192, + CheckPointIndex = 208, Meta = 224, } @@ -1183,6 +1284,10 @@ impl<'a> From> for Vec { encoded.push(KeyPrefix::BlockNumber as u8); encoded.extend_from_slice(&block_number.to_be_bytes()); } + Key::CheckPointIndex(index) => { + encoded.push(KeyPrefix::CheckPointIndex as u8); + encoded.extend_from_slice(&index.to_be_bytes()); + } Key::Meta(meta_key) => { encoded.push(KeyPrefix::Meta as u8); encoded.extend_from_slice(meta_key.as_bytes()); @@ -1205,6 +1310,7 @@ impl<'a> From> for Vec { Value::TxHash(tx_hash) => tx_hash.as_slice().into(), Value::Header(header) => header.as_slice().into(), Value::BlockHash(block_hash) => block_hash.as_slice().into(), + Value::BlockFilterHash(block_filter_hash) => block_filter_hash.as_slice().into(), Value::Meta(meta_value) => meta_value, } } diff --git a/src/subcmds.rs b/src/subcmds.rs index bc6531d..f37803f 100644 --- a/src/subcmds.rs +++ b/src/subcmds.rs @@ -13,6 +13,7 @@ use crate::{ error::{Error, Result}, protocols::{ FilterProtocol, LightClientProtocol, Peers, PendingTxs, RelayProtocol, SyncProtocol, + CHECK_POINT_INTERVAL, }, service::Service, storage::Storage, @@ -38,6 +39,7 @@ impl RunConfig { storage.init_genesis_block(consensus.genesis_block().data()); let pending_txs = Arc::new(RwLock::new(PendingTxs::new(64))); + let max_outbound_peers = self.run_env.network.max_outbound_peers; let network_state = NetworkState::from_config(self.run_env.network) .map(|network_state| { Arc::new(network_state.required_flags( @@ -58,7 +60,11 @@ impl RunConfig { SupportProtocols::Filter.protocol_id(), ]; - let peers = Arc::new(Peers::default()); + let peers = Arc::new(Peers::new( + max_outbound_peers, + CHECK_POINT_INTERVAL, + storage.get_last_check_point(), + )); let sync_protocol = SyncProtocol::new(storage.clone(), Arc::clone(&peers)); let relay_protocol = RelayProtocol::new(pending_txs.clone(), Arc::clone(&peers)); let light_client: Box = Box::new(LightClientProtocol::new( diff --git a/src/tests/prelude.rs b/src/tests/prelude.rs index b549646..04ca005 100644 --- a/src/tests/prelude.rs +++ b/src/tests/prelude.rs @@ -18,6 +18,7 @@ use ckb_types::{ use crate::{ protocols::{ FilterProtocol, LastState, LightClientProtocol, Peers, ProveRequest, SyncProtocol, + CHECK_POINT_INTERVAL, }, storage::Storage, tests::{ALWAYS_SUCCESS_BIN, ALWAYS_SUCCESS_SCRIPT}, @@ -70,6 +71,16 @@ pub(crate) trait ChainExt { fn consensus(&self) -> &Consensus; + fn create_peers(&self) -> Arc { + let max_outbound_peers = 1; + let peers = Peers::new( + max_outbound_peers, + CHECK_POINT_INTERVAL, + self.client_storage().get_last_check_point(), + ); + Arc::new(peers) + } + fn create_light_client_protocol(&self, peers: Arc) -> LightClientProtocol { let storage = self.client_storage().to_owned(); let consensus = self.consensus().to_owned(); diff --git a/src/tests/protocols/block_filter.rs b/src/tests/protocols/block_filter.rs index 1c804d3..a0144c6 100644 --- a/src/tests/protocols/block_filter.rs +++ b/src/tests/protocols/block_filter.rs @@ -16,9 +16,7 @@ use ckb_types::{ use crate::storage::SetScriptsCommand; use crate::storage::{ScriptStatus, ScriptType}; use crate::{ - protocols::{ - LastState, Peers, ProveRequest, ProveState, BAD_MESSAGE_BAN_TIME, GET_BLOCK_FILTERS_TOKEN, - }, + protocols::{BAD_MESSAGE_BAN_TIME, GET_BLOCK_FILTERS_TOKEN}, tests::{ prelude::*, utils::{MockChain, MockNetworkContext}, @@ -30,7 +28,7 @@ async fn test_block_filter_malformed_message() { let chain = MockChain::new_with_dummy_pow("test-block-filter"); let nc = MockNetworkContext::new(SupportProtocols::Filter); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); let mut protocol = chain.create_filter_protocol(peers); let peer_index = PeerIndex::new(3); @@ -69,13 +67,9 @@ async fn test_block_filter_ignore_start_number() { None, Default::default(), ); - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + peers.mock_prove_state(peer_index, tip_header).unwrap(); peers }; let mut protocol = chain.create_filter_protocol(peers); @@ -123,13 +117,9 @@ async fn test_block_filter_empty_filters() { None, Default::default(), ); - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + peers.mock_prove_state(peer_index, tip_header).unwrap(); peers }; let mut protocol = chain.create_filter_protocol(peers); @@ -177,13 +167,9 @@ async fn test_block_filter_invalid_filters_count() { None, Default::default(), ); - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + peers.mock_prove_state(peer_index, tip_header).unwrap(); peers }; let mut protocol = chain.create_filter_protocol(peers); @@ -236,16 +222,12 @@ async fn test_block_filter_start_number_greater_then_proved_number() { None, Default::default(), ); - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + peers.mock_prove_state(peer_index, tip_header).unwrap(); peers }; - let mut protocol = chain.create_filter_protocol(peers); + let mut protocol = chain.create_filter_protocol(Arc::clone(&peers)); let content = packed::BlockFilters::new_builder() .start_number(start_number.pack()) .block_hashes(vec![H256(rand::random()).pack(), H256(rand::random()).pack()].pack()) @@ -255,7 +237,11 @@ async fn test_block_filter_start_number_greater_then_proved_number() { .set(content) .build(); - let peer_index = PeerIndex::new(3); + peers.mock_latest_block_filter_hashes( + peer_index, + 0, + vec![Default::default(); proved_number as usize], + ); protocol .received(nc.context(), peer_index, message.as_bytes()) .await; @@ -292,16 +278,12 @@ async fn test_block_filter_ok_with_blocks_not_matched() { None, Default::default(), ); - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + peers.mock_prove_state(peer_index, tip_header).unwrap(); peers }; - let mut protocol = chain.create_filter_protocol(peers); + let mut protocol = chain.create_filter_protocol(Arc::clone(&peers)); let block_hashes = vec![H256(rand::random()).pack(), H256(rand::random()).pack()]; let blocks_count = block_hashes.len(); let content = packed::BlockFilters::new_builder() @@ -313,6 +295,11 @@ async fn test_block_filter_ok_with_blocks_not_matched() { .set(content) .build(); + peers.mock_latest_block_filter_hashes( + peer_index, + 0, + vec![Default::default(); proved_number as usize], + ); protocol .received(nc.context(), peer_index, message.as_bytes()) .await; @@ -378,13 +365,9 @@ async fn test_block_filter_ok_with_blocks_matched() { let peer_index = PeerIndex::new(3); let (peers, prove_state_block_hash) = { let prove_state_block_hash = header.hash(); - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + peers.mock_prove_state(peer_index, tip_header).unwrap(); (peers, prove_state_block_hash) }; @@ -409,7 +392,12 @@ async fn test_block_filter_ok_with_blocks_matched() { .build() .as_bytes(); - let mut protocol = chain.create_filter_protocol(peers); + let mut protocol = chain.create_filter_protocol(Arc::clone(&peers)); + peers.mock_latest_block_filter_hashes( + peer_index, + 0, + vec![Default::default(); start_number as usize + 2], + ); protocol.received(nc.context(), peer_index, message).await; assert!(nc.not_banned(peer_index)); @@ -425,9 +413,7 @@ async fn test_block_filter_ok_with_blocks_matched() { }; let get_block_filters_message = { let blocks_count = 2; - let limit = proved_number - start_number + 1; - let actual_blocks_count = blocks_count.min(limit); - let new_start_number = start_number - 1 + actual_blocks_count + 1; + let new_start_number = start_number - 1 + blocks_count + 1; let content = packed::GetBlockFilters::new_builder() .start_number(new_start_number.pack()) .build(); @@ -480,17 +466,18 @@ async fn test_block_filter_notify_ask_filters() { None, Default::default(), ); - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + peers.mock_prove_state(peer_index, tip_header).unwrap(); peers }; - let mut protocol = chain.create_filter_protocol(peers); + let mut protocol = chain.create_filter_protocol(Arc::clone(&peers)); + peers.mock_latest_block_filter_hashes( + peer_index, + 0, + vec![Default::default(); min_filtered_block_number as usize + 1], + ); protocol.notify(nc.context(), GET_BLOCK_FILTERS_TOKEN).await; let message = { let start_number: u64 = min_filtered_block_number + 1; @@ -519,8 +506,9 @@ async fn test_block_filter_notify_no_proved_peers() { let peer_index = PeerIndex::new(3); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_filter_protocol(peers); @@ -556,13 +544,9 @@ async fn test_block_filter_notify_not_reach_ask() { None, Default::default(), ); - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + peers.mock_prove_state(peer_index, tip_header).unwrap(); peers }; let mut protocol = chain.create_filter_protocol(peers); @@ -600,13 +584,9 @@ async fn test_block_filter_notify_proved_number_not_big_enough() { None, Default::default(), ); - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + peers.mock_prove_state(peer_index, tip_header).unwrap(); peers }; let mut protocol = chain.create_filter_protocol(peers); @@ -641,13 +621,9 @@ async fn test_block_filter_notify_recover_matched_blocks() { .client_storage() .update_last_state(&U256::one(), &tip_header.header().data(), &[]); let peers = { - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + peers.mock_prove_state(peer_index, tip_header).unwrap(); peers }; let unproved_block_hash = H256(rand::random()).pack(); @@ -659,8 +635,13 @@ async fn test_block_filter_notify_recover_matched_blocks() { chain .client_storage() .add_matched_blocks(2, 2, matched_blocks); - let mut protocol = chain.create_filter_protocol(peers); + let mut protocol = chain.create_filter_protocol(Arc::clone(&peers)); + peers.mock_latest_block_filter_hashes( + peer_index, + 0, + vec![Default::default(); min_filtered_block_number as usize + 2], + ); protocol.notify(nc.context(), GET_BLOCK_FILTERS_TOKEN).await; let get_blocks_proof_message = { diff --git a/src/tests/protocols/light_client/mod.rs b/src/tests/protocols/light_client/mod.rs index 6bdeaaa..66b9471 100644 --- a/src/tests/protocols/light_client/mod.rs +++ b/src/tests/protocols/light_client/mod.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use ckb_network::{bytes::Bytes, CKBProtocolHandler, PeerIndex, SupportProtocols}; use ckb_types::{ core::{BlockNumber, EpochNumberWithFraction, HeaderBuilder}, @@ -10,10 +8,7 @@ use ckb_types::{ }; use crate::{ - protocols::{ - light_client::constant::GET_IDLE_BLOCKS_TOKEN, LastState, PeerState, Peers, ProveRequest, - ProveState, BAD_MESSAGE_BAN_TIME, - }, + protocols::{light_client::constant::GET_IDLE_BLOCKS_TOKEN, PeerState, BAD_MESSAGE_BAN_TIME}, tests::{ prelude::*, utils::{MockChain, MockNetworkContext}, @@ -30,7 +25,7 @@ async fn malformed_message() { let chain = MockChain::new_with_dummy_pow("test-light-client"); let nc = MockNetworkContext::new(SupportProtocols::LightClient); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); let mut protocol = chain.create_light_client_protocol(peers); let peer_index = PeerIndex::new(3); @@ -47,7 +42,7 @@ async fn malformed_message() { fn build_prove_request_content() { let chain = MockChain::new_with_dummy_pow("test-light-client"); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); let protocol = chain.create_light_client_protocol(peers); let storage = chain.client_storage(); @@ -180,13 +175,9 @@ async fn test_light_client_get_idle_matched_blocks() { .update_last_state(&U256::one(), &tip_header.header().data(), &[]); let tip_hash = tip_header.header().hash(); let peers = { - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + peers.mock_prove_state(peer_index, tip_header).unwrap(); peers }; let unproved_block_hash = H256(rand::random()).pack(); diff --git a/src/tests/protocols/light_client/send_blocks_proof.rs b/src/tests/protocols/light_client/send_blocks_proof.rs index 7228d1e..76de814 100644 --- a/src/tests/protocols/light_client/send_blocks_proof.rs +++ b/src/tests/protocols/light_client/send_blocks_proof.rs @@ -1,12 +1,10 @@ -use std::sync::Arc; - use ckb_network::{CKBProtocolHandler, PeerIndex, SupportProtocols}; use ckb_types::{ core::BlockNumber, h256, packed, prelude::*, utilities::merkle_mountain_range::VerifiableHeader, }; use crate::{ - protocols::{LastState, Peers, ProveRequest, ProveState, StatusCode}, + protocols::{LastState, ProveRequest, ProveState, StatusCode}, tests::{ prelude::*, utils::{MockChain, MockNetworkContext}, @@ -18,7 +16,7 @@ async fn peer_state_is_not_found() { let chain = MockChain::new_with_dummy_pow("test-light-client"); let nc = MockNetworkContext::new(SupportProtocols::LightClient); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); let mut protocol = chain.create_light_client_protocol(peers); let data = { @@ -32,7 +30,7 @@ async fn peer_state_is_not_found() { let peer_index = PeerIndex::new(1); protocol.received(nc.context(), peer_index, data).await; - assert!(nc.banned_since(peer_index, StatusCode::PeerStateIsNotFound)); + assert!(nc.banned_since(peer_index, StatusCode::PeerIsNotFound)); } #[tokio::test] @@ -42,8 +40,9 @@ async fn no_matched_request() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -68,8 +67,9 @@ async fn last_state_is_changed() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -95,9 +95,6 @@ async fn last_state_is_changed() { .build_prove_request_content(&peer_state, &last_header) .expect("build prove request content"); let last_state = LastState::new(last_header); - protocol - .peers() - .update_last_state(peer_index, last_state.clone()); ProveRequest::new(last_state, content) }; let last_state = LastState::new(prove_request.get_last_header().to_owned()); @@ -114,11 +111,17 @@ async fn last_state_is_changed() { ProveState::new_from_request(prove_request.clone(), Vec::new(), last_n_headers) }; let content = chain.build_blocks_proof_content(num, &block_numbers, &[]); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); - protocol.commit_prove_state(peer_index, prove_state); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); + protocol + .commit_prove_state(peer_index, prove_state) + .unwrap(); protocol .peers() .update_blocks_proof_request(peer_index, Some(content)); @@ -145,10 +148,8 @@ async fn last_state_is_changed() { assert!(nc.not_banned(peer_index)); - let peer_state = protocol - .get_peer_state(&peer_index) - .expect("has peer state"); - assert!(peer_state.get_blocks_proof_request().is_none()); + let peer = protocol.get_peer(&peer_index).expect("has peer"); + assert!(peer.get_blocks_proof_request().is_none()); } } @@ -159,8 +160,9 @@ async fn unexpected_response() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -187,9 +189,6 @@ async fn unexpected_response() { .build_prove_request_content(&peer_state, &last_header) .expect("build prove request content"); let last_state = LastState::new(last_header); - protocol - .peers() - .update_last_state(peer_index, last_state.clone()); ProveRequest::new(last_state, content) }; let last_state = LastState::new(prove_request.get_last_header().to_owned()); @@ -206,11 +205,17 @@ async fn unexpected_response() { ProveState::new_from_request(prove_request.clone(), Vec::new(), last_n_headers) }; let content = chain.build_blocks_proof_content(num, &block_numbers, &[]); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); - protocol.commit_prove_state(peer_index, prove_state); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); + protocol + .commit_prove_state(peer_index, prove_state) + .unwrap(); protocol .peers() .update_blocks_proof_request(peer_index, Some(content)); @@ -261,8 +266,9 @@ async fn get_blocks_with_chunks() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -290,9 +296,6 @@ async fn get_blocks_with_chunks() { .build_prove_request_content(&peer_state, &last_header) .expect("build prove request content"); let last_state = LastState::new(last_header); - protocol - .peers() - .update_last_state(peer_index, last_state.clone()); ProveRequest::new(last_state, content) }; let last_state = LastState::new(prove_request.get_last_header().to_owned()); @@ -309,11 +312,17 @@ async fn get_blocks_with_chunks() { ProveState::new_from_request(prove_request.clone(), Vec::new(), last_n_headers) }; let content = chain.build_blocks_proof_content(num, &block_numbers, &[]); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); - protocol.commit_prove_state(peer_index, prove_state); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); + protocol + .commit_prove_state(peer_index, prove_state) + .unwrap(); protocol .peers() .update_blocks_proof_request(peer_index, Some(content)); @@ -383,10 +392,8 @@ async fn get_blocks_with_chunks() { .collect::>(); assert_eq!(actual_block_hashes.as_slice(), block_hashes.as_slice()); - let peer_state = protocol - .get_peer_state(&peer_index) - .expect("has peer state"); - assert!(peer_state.get_blocks_proof_request().is_none()); + let peer = protocol.get_peer(&peer_index).expect("has peer"); + assert!(peer.get_blocks_proof_request().is_none()); } } @@ -609,8 +616,9 @@ async fn test_send_blocks_proof(param: TestParameter) { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -634,9 +642,6 @@ async fn test_send_blocks_proof(param: TestParameter) { .build_prove_request_content(&peer_state, &last_header) .expect("build prove request content"); let last_state = LastState::new(last_header); - protocol - .peers() - .update_last_state(peer_index, last_state.clone()); ProveRequest::new(last_state, content) }; let last_state = LastState::new(prove_request.get_last_header().to_owned()); @@ -657,11 +662,17 @@ async fn test_send_blocks_proof(param: TestParameter) { ¶m.block_numbers, ¶m.missing_block_hashes, ); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); - protocol.commit_prove_state(peer_index, prove_state); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); + protocol + .commit_prove_state(peer_index, prove_state) + .unwrap(); protocol .peers() .update_blocks_proof_request(peer_index, Some(content)); @@ -725,10 +736,8 @@ async fn test_send_blocks_proof(param: TestParameter) { assert_eq!(content.block_hashes().as_slice(), block_hashes.as_slice()); } - let peer_state = protocol - .get_peer_state(&peer_index) - .expect("has peer state"); - assert!(peer_state.get_blocks_proof_request().is_none()); + let peer = protocol.get_peer(&peer_index).expect("has peer"); + assert!(peer.get_blocks_proof_request().is_none()); } else { if param.missing_block_hashes != param.returned_missing_block_hashes || param.block_numbers != param.returned_headers diff --git a/src/tests/protocols/light_client/send_last_state.rs b/src/tests/protocols/light_client/send_last_state.rs index d6c6bb5..a1f3104 100644 --- a/src/tests/protocols/light_client/send_last_state.rs +++ b/src/tests/protocols/light_client/send_last_state.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use ckb_network::{CKBProtocolHandler, PeerIndex, SupportProtocols}; use ckb_types::{ core::{EpochNumberWithFraction, HeaderBuilder}, @@ -9,7 +7,7 @@ use ckb_types::{ }; use crate::{ - protocols::{LastState, Peers, ProveRequest, ProveState, StatusCode}, + protocols::{LastState, ProveRequest, ProveState, StatusCode}, tests::{ prelude::*, utils::{MockChain, MockNetworkContext}, @@ -21,7 +19,7 @@ async fn peer_state_is_not_found() { let chain = MockChain::new_with_dummy_pow("test-light-client"); let nc = MockNetworkContext::new(SupportProtocols::LightClient); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); let mut protocol = chain.create_light_client_protocol(peers); let data = { @@ -35,7 +33,7 @@ async fn peer_state_is_not_found() { let peer_index = PeerIndex::new(1); protocol.received(nc.context(), peer_index, data).await; - assert!(nc.banned_since(peer_index, StatusCode::PeerStateIsNotFound)); + assert!(nc.banned_since(peer_index, StatusCode::PeerIsNotFound)); } #[tokio::test] @@ -45,8 +43,9 @@ async fn invalid_nonce() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -71,8 +70,9 @@ async fn invalid_chain_root() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -106,8 +106,9 @@ async fn initialize_last_state() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -166,8 +167,9 @@ async fn update_to_continuous_last_state() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -196,9 +198,14 @@ async fn update_to_continuous_last_state() { let last_state = LastState::new(last_header); protocol .peers() - .update_last_state(peer_index, last_state.clone()); + .update_last_state(peer_index, last_state.clone()) + .unwrap(); ProveRequest::new(last_state, content) }; + protocol + .peers() + .update_prove_request(peer_index, prove_request.clone()) + .unwrap(); let prove_state = { let last_n_headers = (1..num) .into_iter() @@ -206,7 +213,9 @@ async fn update_to_continuous_last_state() { .collect::>(); ProveState::new_from_request(prove_request, Vec::new(), last_n_headers) }; - protocol.commit_prove_state(peer_index, prove_state); + protocol + .commit_prove_state(peer_index, prove_state) + .unwrap(); } num += 1; @@ -257,8 +266,9 @@ async fn update_to_noncontinuous_last_state() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -287,9 +297,14 @@ async fn update_to_noncontinuous_last_state() { let last_state = LastState::new(last_header); protocol .peers() - .update_last_state(peer_index, last_state.clone()); + .update_last_state(peer_index, last_state.clone()) + .unwrap(); ProveRequest::new(last_state, content) }; + protocol + .peers() + .update_prove_request(peer_index, prove_request.clone()) + .unwrap(); let prove_state = { let last_n_headers = (1..num) .into_iter() @@ -297,7 +312,9 @@ async fn update_to_noncontinuous_last_state() { .collect::>(); ProveState::new_from_request(prove_request, Vec::new(), last_n_headers) }; - protocol.commit_prove_state(peer_index, prove_state); + protocol + .commit_prove_state(peer_index, prove_state) + .unwrap(); } num += 2; @@ -348,8 +365,9 @@ async fn update_to_continuous_but_forked_last_state() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -383,9 +401,14 @@ async fn update_to_continuous_but_forked_last_state() { let last_state = LastState::new(last_header); protocol .peers() - .update_last_state(peer_index, last_state.clone()); + .update_last_state(peer_index, last_state.clone()) + .unwrap(); ProveRequest::new(last_state, content) }; + protocol + .peers() + .update_prove_request(peer_index, prove_request.clone()) + .unwrap(); let prove_state = { let last_n_headers = (1..num) .into_iter() @@ -393,7 +416,9 @@ async fn update_to_continuous_but_forked_last_state() { .collect::>(); ProveState::new_from_request(prove_request, Vec::new(), last_n_headers) }; - protocol.commit_prove_state(peer_index, prove_state); + protocol + .commit_prove_state(peer_index, prove_state) + .unwrap(); } let prev_last_header: VerifiableHeader = chain @@ -466,7 +491,7 @@ async fn update_to_proved_last_state() { let peer_index = PeerIndex::new(1); let peer_index_proved = PeerIndex::new(2); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); peers.add_peer(peer_index_proved); peers @@ -483,18 +508,15 @@ async fn update_to_proved_last_state() { let peer_state = protocol .get_peer_state(&peer_index_proved) .expect("has peer state"); + let last_header: VerifiableHeader = snapshot + .get_verifiable_header_by_number(num) + .expect("block stored") + .into(); let prove_request = { - let last_header: VerifiableHeader = snapshot - .get_verifiable_header_by_number(num) - .expect("block stored") - .into(); let content = protocol .build_prove_request_content(&peer_state, &last_header) .expect("build prove request content"); - let last_state = LastState::new(last_header); - protocol - .peers() - .update_last_state(peer_index_proved, last_state.clone()); + let last_state = LastState::new(last_header.clone()); ProveRequest::new(last_state, content) }; let prove_state = { @@ -504,7 +526,21 @@ async fn update_to_proved_last_state() { .collect::>(); ProveState::new_from_request(prove_request.clone(), Vec::new(), last_n_headers) }; - protocol.commit_prove_state(peer_index_proved, prove_state); + protocol + .peers() + .mock_prove_request(peer_index_proved, prove_request) + .unwrap(); + protocol + .commit_prove_state(peer_index_proved, prove_state) + .unwrap(); + + let prove_state = protocol + .get_peer_state(&peer_index_proved) + .expect("has peer state") + .get_prove_state() + .expect("has prove state") + .to_owned(); + assert!(prove_state.is_same_as(&last_header)); } // Run the test. @@ -523,6 +559,7 @@ async fn update_to_proved_last_state() { .as_bytes(); let last_header: VerifiableHeader = last_header.into(); + protocol.peers().request_last_state(peer_index).unwrap(); protocol.received(nc.context(), peer_index, data).await; assert!(nc.sent_messages().borrow().is_empty()); diff --git a/src/tests/protocols/light_client/send_last_state_proof.rs b/src/tests/protocols/light_client/send_last_state_proof.rs index 67e456a..d0f90a8 100644 --- a/src/tests/protocols/light_client/send_last_state_proof.rs +++ b/src/tests/protocols/light_client/send_last_state_proof.rs @@ -8,7 +8,7 @@ use ckb_types::{ use log::debug; use crate::{ - protocols::{light_client::prelude::*, LastState, Peers, ProveRequest, ProveState, StatusCode}, + protocols::{light_client::prelude::*, LastState, ProveRequest, ProveState, StatusCode}, tests::{ prelude::*, utils::{setup, MockChain, MockNetworkContext}, @@ -34,7 +34,7 @@ async fn peer_state_is_not_found() { let chain = MockChain::new_with_dummy_pow("test-light-client"); let nc = MockNetworkContext::new(SupportProtocols::LightClient); - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); let mut protocol = chain.create_light_client_protocol(peers); let data = { @@ -48,7 +48,7 @@ async fn peer_state_is_not_found() { let peer_index = PeerIndex::new(1); protocol.received(nc.context(), peer_index, data).await; - assert!(nc.banned_since(peer_index, StatusCode::PeerStateIsNotFound)); + assert!(nc.banned_since(peer_index, StatusCode::PeerIsNotFound)); } #[tokio::test] @@ -58,8 +58,9 @@ async fn no_matched_request() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -89,8 +90,9 @@ async fn update_last_state() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -116,12 +118,14 @@ async fn update_last_state() { let last_state = LastState::new(last_header); protocol .peers() - .update_last_state(peer_index, last_state.clone()); + .update_last_state(peer_index, last_state.clone()) + .unwrap(); ProveRequest::new(last_state, content) }; protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .update_prove_request(peer_index, prove_request) + .unwrap(); } num += 2; @@ -170,8 +174,9 @@ async fn unknown_proof() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -197,12 +202,14 @@ async fn unknown_proof() { let last_state = LastState::new(last_header); protocol .peers() - .update_last_state(peer_index, last_state.clone()); + .update_last_state(peer_index, last_state.clone()) + .unwrap(); ProveRequest::new(last_state, content) }; protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .update_prove_request(peer_index, prove_request) + .unwrap(); } num += 2; @@ -240,8 +247,9 @@ async fn headers_should_be_sorted() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -271,12 +279,14 @@ async fn headers_should_be_sorted() { let last_state = LastState::new(last_header); protocol .peers() - .update_last_state(peer_index, last_state.clone()); + .update_last_state(peer_index, last_state.clone()) + .unwrap(); ProveRequest::new(last_state, content) }; protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .update_prove_request(peer_index, prove_request) + .unwrap(); } // Run the test. @@ -323,8 +333,9 @@ async fn valid_proof_with_boundary_not_in_last_n() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -348,10 +359,14 @@ async fn valid_proof_with_boundary_not_in_last_n() { protocol.last_n_blocks(), ); let last_state = LastState::new(prove_request.get_last_header().to_owned()); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); } // Run the test. @@ -413,8 +428,9 @@ async fn valid_proof_with_boundary_in_last_n() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -438,10 +454,14 @@ async fn valid_proof_with_boundary_in_last_n() { protocol.last_n_blocks(), ); let last_state = LastState::new(prove_request.get_last_header().to_owned()); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); } // Run the test. @@ -503,8 +523,9 @@ async fn valid_proof_with_no_matched_sample() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -563,10 +584,14 @@ async fn valid_proof_with_no_matched_sample() { ProveRequest::new(last_state, content) }; let last_state = LastState::new(prove_request.get_last_header().to_owned()); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); } // Run the test. @@ -627,8 +652,9 @@ async fn valid_proof_with_prove_state() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -654,6 +680,15 @@ async fn valid_proof_with_prove_state() { prev_boundary_number, protocol.last_n_blocks(), ); + let prev_last_state = LastState::new(prev_prove_request.get_last_header().to_owned()); + protocol + .peers() + .update_last_state(peer_index, prev_last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prev_prove_request.clone()) + .unwrap(); let prove_state = { let prev_last_n_blocks_start_number = if prev_last_number > protocol.last_n_blocks() + 1 { @@ -667,7 +702,9 @@ async fn valid_proof_with_prove_state() { .collect::>(); ProveState::new_from_request(prev_prove_request, Vec::new(), last_n_headers) }; - protocol.commit_prove_state(peer_index, prove_state); + protocol + .commit_prove_state(peer_index, prove_state) + .unwrap(); let prove_request = chain.build_prove_request( prev_last_number, num, @@ -676,10 +713,14 @@ async fn valid_proof_with_prove_state() { protocol.last_n_blocks(), ); let last_state = LastState::new(prove_request.get_last_header().to_owned()); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); } // Run the test. @@ -741,8 +782,9 @@ async fn valid_proof_with_reorg_blocks() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -768,6 +810,15 @@ async fn valid_proof_with_reorg_blocks() { prev_boundary_number, protocol.last_n_blocks(), ); + let prev_last_state = LastState::new(prev_prove_request.get_last_header().to_owned()); + protocol + .peers() + .update_last_state(peer_index, prev_last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prev_prove_request.clone()) + .unwrap(); let prove_state = { let prev_last_n_blocks_start_number = if prev_last_number > protocol.last_n_blocks() + 1 { @@ -781,7 +832,9 @@ async fn valid_proof_with_reorg_blocks() { .collect::>(); ProveState::new_from_request(prev_prove_request, Vec::new(), last_n_headers) }; - protocol.commit_prove_state(peer_index, prove_state); + protocol + .commit_prove_state(peer_index, prove_state) + .unwrap(); let prove_request = chain.build_prove_request( prev_last_number, num, @@ -789,11 +842,10 @@ async fn valid_proof_with_reorg_blocks() { boundary_number, protocol.last_n_blocks(), ); - let last_state = LastState::new(prove_request.get_last_header().to_owned()); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .mock_prove_request(peer_index, prove_request) + .unwrap(); } // Run the test. @@ -873,8 +925,9 @@ async fn test_parent_chain_root_for_the_genesis_block(should_passed: bool) { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -899,10 +952,14 @@ async fn test_parent_chain_root_for_the_genesis_block(should_passed: bool) { protocol.last_n_blocks(), ); let last_state = LastState::new(prove_request.get_last_header().to_owned()); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); } // Run the test. @@ -977,8 +1034,9 @@ async fn invalid_parent_chain_root_for_non_genesis_blocks() { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -1003,10 +1061,14 @@ async fn invalid_parent_chain_root_for_non_genesis_blocks() { protocol.last_n_blocks(), ); let last_state = LastState::new(prove_request.get_last_header().to_owned()); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); } // Run the test. @@ -1269,8 +1331,9 @@ async fn test_send_last_state_proof(param: TestParameter) { let peer_index = PeerIndex::new(1); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers }; let mut protocol = chain.create_light_client_protocol(peers); @@ -1301,10 +1364,14 @@ async fn test_send_last_state_proof(param: TestParameter) { last_n_blocks, ); let last_state = LastState::new(prove_request.get_last_header().to_owned()); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); } // Run the test. @@ -1623,8 +1690,9 @@ async fn test_with_reorg_blocks(param: ReorgTestParameter) { let peer_index = PeerIndex::new(1); let downloading_matched_block = H256(rand::random()); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); + peers.request_last_state(peer_index).unwrap(); peers .matched_blocks() .write() @@ -1698,6 +1766,15 @@ async fn test_with_reorg_blocks(param: ReorgTestParameter) { prev_boundary_number, last_n_blocks, ); + let prev_last_state = LastState::new(prev_prove_request.get_last_header().to_owned()); + protocol + .peers() + .update_last_state(peer_index, prev_last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prev_prove_request.clone()) + .unwrap(); let prove_state = { let prev_last_n_blocks_start_number = if prev_last_number > last_n_blocks + 1 { prev_last_number - last_n_blocks @@ -1710,7 +1787,9 @@ async fn test_with_reorg_blocks(param: ReorgTestParameter) { .collect::>(); ProveState::new_from_request(prev_prove_request, Vec::new(), last_n_headers) }; - protocol.commit_prove_state(peer_index, prove_state); + protocol + .commit_prove_state(peer_index, prove_state) + .unwrap(); } // Setup the storage data. @@ -1765,10 +1844,14 @@ async fn test_with_reorg_blocks(param: ReorgTestParameter) { prove_request.long_fork_detected(); } let last_state = LastState::new(prove_request.get_last_header().to_owned()); - protocol.peers().update_last_state(peer_index, last_state); protocol .peers() - .update_prove_request(peer_index, Some(prove_request)); + .update_last_state(peer_index, last_state) + .unwrap(); + protocol + .peers() + .update_prove_request(peer_index, prove_request) + .unwrap(); let snapshot = chain.shared().snapshot(); let last_header = snapshot diff --git a/src/tests/protocols/light_client/send_transactions_proof.rs b/src/tests/protocols/light_client/send_transactions_proof.rs index 99c5bec..eb310a1 100644 --- a/src/tests/protocols/light_client/send_transactions_proof.rs +++ b/src/tests/protocols/light_client/send_transactions_proof.rs @@ -10,10 +10,7 @@ use ckb_types::{ }; use crate::{ - protocols::{ - light_client::constant::FETCH_HEADER_TX_TOKEN, FetchInfo, LastState, Peers, ProveRequest, - ProveState, StatusCode, - }, + protocols::{light_client::constant::FETCH_HEADER_TX_TOKEN, FetchInfo, StatusCode}, tests::{ prelude::*, utils::{MockChain, MockNetworkContext}, @@ -112,13 +109,7 @@ async fn test_send_txs_proof_ok() { }; let peers = { - let last_state = LastState::new(last_header.clone().into()); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); - peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + let peers = chain.create_peers(); let txs_proof_request = packed::GetTransactionsProof::new_builder() .last_hash(last_header.header().calc_header_hash()) .tx_hashes( @@ -130,6 +121,10 @@ async fn test_send_txs_proof_ok() { .pack(), ) .build(); + peers.add_peer(peer_index); + peers + .mock_prove_state(peer_index, last_header.into()) + .unwrap(); peers.update_txs_proof_request(peer_index, Some(txs_proof_request)); for tx_hash in &missing_tx_hashes { peers @@ -251,17 +246,15 @@ async fn test_send_txs_proof_invalid_mmr_proof() { }; let peers = { - let last_state = LastState::new(last_header.clone().into()); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); - peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + let peers = chain.create_peers(); let txs_proof_request = packed::GetTransactionsProof::new_builder() .last_hash(last_header.header().calc_header_hash()) .tx_hashes(tx_hashes.clone().pack()) .build(); + peers.add_peer(peer_index); + peers + .mock_prove_state(peer_index, last_header.into()) + .unwrap(); peers.update_txs_proof_request(peer_index, Some(txs_proof_request)); peers }; @@ -383,17 +376,15 @@ async fn test_send_txs_proof_invalid_merkle_proof() { }; let peers = { - let last_state = LastState::new(last_header.clone().into()); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); - peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + let peers = chain.create_peers(); let txs_proof_request = packed::GetTransactionsProof::new_builder() .last_hash(last_header.header().calc_header_hash()) .tx_hashes(tx_hashes.clone().pack()) .build(); + peers.add_peer(peer_index); + peers + .mock_prove_state(peer_index, last_header.into()) + .unwrap(); peers.update_txs_proof_request(peer_index, Some(txs_proof_request)); peers }; @@ -440,16 +431,14 @@ async fn test_send_txs_proof_is_empty() { }; let peers = { - let last_state = LastState::new(last_header.clone().into()); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - let peers = Arc::new(Peers::default()); - peers.add_peer(peer_index); - peers.commit_prove_state(peer_index, prove_state); + let peers = chain.create_peers(); let txs_proof_request = packed::GetTransactionsProof::new_builder() .last_hash(last_header.header().calc_header_hash()) .build(); + peers.add_peer(peer_index); + peers + .mock_prove_state(peer_index, last_header.into()) + .unwrap(); peers.update_txs_proof_request(peer_index, Some(txs_proof_request)); peers }; @@ -470,7 +459,7 @@ async fn test_send_headers_txs_request() { let peer_index = PeerIndex::new(3); let peers = { - let peers = Arc::new(Peers::new(Default::default())); + let peers = chain.create_peers(); peers.fetching_headers().insert( h256!("0xaa22").pack(), FetchInfo::new(111, 3344, false, false), @@ -486,19 +475,14 @@ async fn test_send_headers_txs_request() { .fetching_txs() .insert(h256!("0xbb33").pack(), FetchInfo::new(111, 0, false, false)); - peers.add_peer(peer_index); - let tip_header = VerifiableHeader::new( chain.client_storage().get_tip_header().into_view(), Default::default(), None, Default::default(), ); - let last_state = LastState::new(tip_header); - let request = ProveRequest::new(last_state, Default::default()); - let prove_state = - ProveState::new_from_request(request, Default::default(), Default::default()); - peers.commit_prove_state(peer_index, prove_state); + peers.add_peer(peer_index); + peers.mock_prove_state(peer_index, tip_header).unwrap(); peers }; @@ -524,7 +508,7 @@ async fn test_send_headers_txs_request() { .first_sent() > 0 ); - let peer_state = peers.get_state(&peer_index).unwrap(); - assert!(peer_state.get_blocks_proof_request().is_some()); - assert!(peer_state.get_txs_proof_request().is_some()); + let peer = peers.get_peer(&peer_index).unwrap(); + assert!(peer.get_blocks_proof_request().is_some()); + assert!(peer.get_txs_proof_request().is_some()); } diff --git a/src/tests/protocols/synchronizer.rs b/src/tests/protocols/synchronizer.rs index cf0e8b8..04b0cc8 100644 --- a/src/tests/protocols/synchronizer.rs +++ b/src/tests/protocols/synchronizer.rs @@ -8,7 +8,6 @@ use ckb_types::{ }; use crate::{ - protocols::Peers, storage::{ScriptStatus, ScriptType}, tests::{ prelude::*, @@ -48,7 +47,7 @@ async fn test_sync_add_block() { ); let peer_index = PeerIndex::new(3); let peers = { - let peers = Arc::new(Peers::default()); + let peers = chain.create_peers(); peers.add_peer(peer_index); { let mut matched_blocks = peers.matched_blocks().write().unwrap(); diff --git a/src/tests/service.rs b/src/tests/service.rs index 13b66d3..f8d9e4a 100644 --- a/src/tests/service.rs +++ b/src/tests/service.rs @@ -14,23 +14,20 @@ use ckb_types::{ }; use crate::{ - protocols::{FetchInfo, Peers, PendingTxs}, + protocols::{FetchInfo, PendingTxs}, service::{ BlockFilterRpc, BlockFilterRpcImpl, ChainRpc, ChainRpcImpl, FetchStatus, Order, ScriptStatus, ScriptType, SearchKey, SearchKeyFilter, SetScriptsCommand, Status, TransactionRpc, TransactionRpcImpl, TransactionWithStatus, TxStatus, }, storage::{self, StorageWithChainData}, - tests::utils::new_storage, + tests::utils::{create_peers, new_storage}, }; #[test] fn rpc() { let storage = new_storage("rpc"); - let swc = StorageWithChainData::new( - storage.clone(), - Arc::new(Peers::new(RwLock::new(Vec::new()))), - ); + let swc = StorageWithChainData::new(storage.clone(), create_peers()); let rpc = BlockFilterRpcImpl { swc }; // setup test data @@ -735,7 +732,12 @@ fn rpc() { .collect(); // insert fetched headers - let peers = Arc::new(Peers::new(RwLock::new(vec![extra_header.clone()]))); + let peers = create_peers(); + peers + .last_headers() + .write() + .unwrap() + .push(extra_header.clone()); peers.fetching_headers().insert( h256!("0xaa22").pack(), FetchInfo::new(1111, 3344, false, false), @@ -822,10 +824,7 @@ fn rpc() { "rollback should update script filter block number" ); - let swc = StorageWithChainData::new( - storage.clone(), - Arc::new(Peers::new(RwLock::new(Vec::new()))), - ); + let swc = StorageWithChainData::new(storage.clone(), create_peers()); let rpc = BlockFilterRpcImpl { swc }; // test get_cells rpc after rollback @@ -1009,10 +1008,7 @@ fn rpc() { #[test] fn get_cells_capacity_bug() { let storage = new_storage("get_cells_capacity_bug"); - let swc = StorageWithChainData::new( - storage.clone(), - Arc::new(Peers::new(RwLock::new(Vec::new()))), - ); + let swc = StorageWithChainData::new(storage.clone(), create_peers()); let rpc = BlockFilterRpcImpl { swc }; // setup test data @@ -1137,10 +1133,7 @@ fn get_cells_capacity_bug() { #[test] fn get_cells_after_rollback_bug() { let storage = new_storage("get_cells_after_rollback_bug"); - let swc = StorageWithChainData::new( - storage.clone(), - Arc::new(Peers::new(RwLock::new(Vec::new()))), - ); + let swc = StorageWithChainData::new(storage.clone(), create_peers()); let rpc = BlockFilterRpcImpl { swc }; // setup test data @@ -1333,7 +1326,7 @@ fn get_cells_after_rollback_bug() { #[test] fn test_set_scripts_clear_matched_blocks() { let storage = new_storage("set-scripts-clear-matched-blocks"); - let peers = Arc::new(Peers::new(RwLock::new(Vec::new()))); + let peers = create_peers(); let swc = StorageWithChainData::new(storage.clone(), Arc::clone(&peers)); let rpc = BlockFilterRpcImpl { swc }; @@ -1383,7 +1376,7 @@ fn test_set_scripts_clear_matched_blocks() { #[test] fn test_set_scripts_command() { let storage = new_storage("set-scripts-command"); - let peers = Arc::new(Peers::new(RwLock::new(Vec::new()))); + let peers = create_peers(); let swc = StorageWithChainData::new(storage.clone(), Arc::clone(&peers)); let rpc = BlockFilterRpcImpl { swc }; diff --git a/src/tests/utils/mod.rs b/src/tests/utils/mod.rs index 27564d3..4caac6b 100644 --- a/src/tests/utils/mod.rs +++ b/src/tests/utils/mod.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use env_logger::{Builder, Target}; use log::LevelFilter; @@ -7,7 +9,7 @@ mod network_context; pub(crate) use chain::MockChain; pub(crate) use network_context::MockNetworkContext; -use crate::storage::Storage; +use crate::{protocols::Peers, protocols::CHECK_POINT_INTERVAL, storage::Storage}; pub(crate) fn setup() { let _ = Builder::new() @@ -23,3 +25,13 @@ pub(crate) fn new_storage(prefix: &str) -> Storage { let tmp_dir = tempfile::Builder::new().prefix(prefix).tempdir().unwrap(); Storage::new(tmp_dir.path().to_str().unwrap()) } + +pub(crate) fn create_peers() -> Arc { + let max_outbound_peers = 1; + let peers = Peers::new( + max_outbound_peers, + CHECK_POINT_INTERVAL, + (0, Default::default()), + ); + Arc::new(peers) +} diff --git a/src/tests/verify.rs b/src/tests/verify.rs index f8a1998..4dcebf8 100644 --- a/src/tests/verify.rs +++ b/src/tests/verify.rs @@ -29,7 +29,7 @@ fn verify_valid_transaction() { // https://pudge.explorer.nervos.org/transaction/0xf34f4eaac4a662927fb52d4cb608e603150b9e0678a0f5ed941e3cfd5b68fb30 let transaction: packed::Transaction = serde_json::from_str::(r#"{"cell_deps":[{"dep_type":"dep_group","out_point":{"index":"0x0","tx_hash":"0xf8de3bb47d055cdf460d93a2a6e1b05f7432f9777c8c474abf4eec1d4aee5d37"}}],"header_deps":[],"inputs":[{"previous_output":{"index":"0x7","tx_hash":"0x8f8c79eb6671709633fe6a46de93c0fedc9c1b8a6527a18d3983879542635c9f"},"since":"0x0"}],"outputs":[{"capacity":"0x470de4df820000","lock":{"args":"0xff5094c2c5f476fc38510018609a3fd921dd28ad","code_hash":"0x9bd7e06f3ecf4be0f2fcd2188b23f1b9fcc88e5d4b65a8637b17723bbda3cce8","hash_type":"type"},"type":null},{"capacity":"0xb61134e5a35e800","lock":{"args":"0x64257f00b6b63e987609fa9be2d0c86d351020fb","code_hash":"0x9bd7e06f3ecf4be0f2fcd2188b23f1b9fcc88e5d4b65a8637b17723bbda3cce8","hash_type":"type"},"type":null}],"outputs_data":["0x","0x"],"version":"0x0","witnesses":["0x5500000010000000550000005500000041000000af34b54bebf8c5971da6a880f2df5a186c3f8d0b5c9a1fe1a90c95b8a4fb89ef3bab1ccec13797dcb3fee80400f953227dd7741227e08032e3598e16ccdaa49c00"]}"#).unwrap().into(); - let swc = StorageWithChainData::new(storage.to_owned(), Default::default()); + let swc = StorageWithChainData::new(storage.to_owned(), chain.create_peers()); let result = verify_tx(transaction.into_view(), &swc, &consensus).unwrap(); assert_eq!(1682789, result); } @@ -39,7 +39,7 @@ fn non_contextual_transaction_verifier() { let chain = MockChain::new_with_default_pow("non_contextual_transaction_verifier"); let storage = chain.client_storage(); let consensus = chain.consensus(); - let swc = StorageWithChainData::new(storage.to_owned(), Default::default()); + let swc = StorageWithChainData::new(storage.to_owned(), chain.create_peers()); // duplicate cell deps base on a valid transaction // https://pudge.explorer.nervos.org/transaction/0xf34f4eaac4a662927fb52d4cb608e603150b9e0678a0f5ed941e3cfd5b68fb30 diff --git a/src/utils/network.rs b/src/utils/network.rs index cfc8343..661dee2 100644 --- a/src/utils/network.rs +++ b/src/utils/network.rs @@ -17,12 +17,12 @@ pub(crate) fn prove_or_download_matched_blocks( let last_hash = best_tip.calc_header_hash(); loop { - if let Some(peer) = best_peers + if let Some(peer_index) = best_peers .iter() - .filter(|peer| { + .filter(|peer_index| { peers - .get_state(peer) - .map(|state| state.get_blocks_proof_request().is_none()) + .get_peer(peer_index) + .map(|peer| peer.get_blocks_proof_request().is_none()) .unwrap_or(false) }) .collect::>() @@ -34,7 +34,7 @@ pub(crate) fn prove_or_download_matched_blocks( if !blocks_to_prove.is_empty() { debug!( "send get blocks proof request to peer: {}, count={}", - peer, + peer_index, blocks_to_prove.len() ); let content = packed::GetBlocksProof::new_builder() @@ -45,10 +45,12 @@ pub(crate) fn prove_or_download_matched_blocks( .set(content.clone()) .build() .as_bytes(); - peers.update_blocks_proof_request(*peer, Some(content)); - if let Err(err) = - nc.send_message(SupportProtocols::LightClient.protocol_id(), *peer, message) - { + peers.update_blocks_proof_request(*peer_index, Some(content)); + if let Err(err) = nc.send_message( + SupportProtocols::LightClient.protocol_id(), + *peer_index, + message, + ) { let error_message = format!("nc.send_message LightClientMessage, error: {:?}", err); error!("{}", error_message); @@ -63,12 +65,12 @@ pub(crate) fn prove_or_download_matched_blocks( } loop { - if let Some(peer) = best_peers + if let Some(peer_index) = best_peers .iter() - .filter(|peer| { + .filter(|peer_index| { peers - .get_state(peer) - .map(|state| state.get_blocks_request().is_none()) + .get_peer(peer_index) + .map(|peer| peer.get_blocks_request().is_none()) .unwrap_or(false) }) .collect::>() @@ -80,10 +82,10 @@ pub(crate) fn prove_or_download_matched_blocks( if !blocks_to_download.is_empty() { debug!( "send get blocks request to peer: {}, count={}", - peer, + peer_index, blocks_to_download.len() ); - peers.update_blocks_request(*peer, Some(blocks_to_download.clone())); + peers.update_blocks_request(*peer_index, Some(blocks_to_download.clone())); let content = packed::GetBlocks::new_builder() .block_hashes(blocks_to_download.pack()) .build(); @@ -92,7 +94,7 @@ pub(crate) fn prove_or_download_matched_blocks( .build() .as_bytes(); if let Err(err) = - nc.send_message(SupportProtocols::Sync.protocol_id(), *peer, message) + nc.send_message(SupportProtocols::Sync.protocol_id(), *peer_index, message) { let error_message = format!("nc.send_message SyncMessage, error: {:?}", err); error!("{}", error_message);