diff --git a/Cargo.lock b/Cargo.lock index 72bcbba42..d92a97bce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6,7 +6,9 @@ version = 3 name = "addresses" version = "0.1.0" dependencies = [ + "borsh", "criterion", + "serde", ] [[package]] @@ -38,6 +40,15 @@ dependencies = [ "version_check", ] +[[package]] +name = "aho-corasick" +version = "0.7.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +dependencies = [ + "memchr", +] + [[package]] name = "anes" version = "0.1.6" @@ -74,11 +85,11 @@ dependencies = [ [[package]] name = "async-channel" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14485364214912d3b19cc3435dde4df66065127f05fa0d75c712f36f12c2f28" +checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" dependencies = [ - "concurrent-queue 1.2.4", + "concurrent-queue", "event-listener", "futures-core", ] @@ -91,7 +102,7 @@ checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" dependencies = [ "async-lock", "async-task", - "concurrent-queue 2.0.0", + "concurrent-queue", "fastrand", "futures-lite", "slab", @@ -120,7 +131,7 @@ checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" dependencies = [ "async-lock", "autocfg", - "concurrent-queue 2.0.0", + "concurrent-queue", "futures-lite", "libc", "log", @@ -198,9 +209,9 @@ checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364" dependencies = [ "proc-macro2", "quote", @@ -232,9 +243,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.17" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" +checksum = "08b108ad2665fa3f6e6a517c3d80ec3e77d224c47d605167aefaa5d7ef97fa48" dependencies = [ "async-trait", "axum-core", @@ -250,9 +261,9 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", + "rustversion", "serde", "sync_wrapper", - "tokio", "tower", "tower-http", "tower-layer", @@ -261,9 +272,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.9" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" +checksum = "79b8558f5a0581152dc94dcd289132a1d377494bdeafcd41869b3258e3e2ad92" dependencies = [ "async-trait", "bytes", @@ -271,6 +282,7 @@ dependencies = [ "http", "http-body", "mime", + "rustversion", "tower-layer", "tower-service", ] @@ -504,12 +516,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "cache-padded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" - [[package]] name = "cast" version = "0.3.0" @@ -592,9 +598,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.0.27" +version = "4.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0acbd8d28a0a60d7108d7ae850af6ba34cf2d1257fc646980e5f97ce14275966" +checksum = "4d63b9e9c07271b9957ad22c173bae2a4d9a81127680962039296abcd2f8251d" dependencies = [ "bitflags", "clap_derive", @@ -636,15 +642,6 @@ dependencies = [ "os_str_bytes", ] -[[package]] -name = "concurrent-queue" -version = "1.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4780a44ab5696ea9e28294517f1fffb421a83a25af521333c838635509db9c" -dependencies = [ - "cache-padded", -] - [[package]] name = "concurrent-queue" version = "2.0.0" @@ -670,6 +667,7 @@ dependencies = [ "itertools", "kaspa-core", "kaspa-utils", + "log", "math", "merkle", "muhash", @@ -694,6 +692,7 @@ version = "0.1.0" dependencies = [ "borsh", "faster-hex", + "futures-util", "hashes", "math", "merkle", @@ -936,12 +935,45 @@ dependencies = [ "subtle", ] +[[package]] +name = "dirs" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "either" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +[[package]] +name = "env_logger" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + [[package]] name = "errno" version = "0.2.8" @@ -1161,9 +1193,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "gloo-timers" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb7d06c1c8cc2a29bee7ec961009a0b2caa0793ee4900c2ffb348734ba1c8f9" +checksum = "98c4a8d6391675c6b2ee1a6c8d06e8e2d03605c44cec1270675985a4c2a5500b" dependencies = [ "futures-channel", "futures-core", @@ -1316,6 +1348,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "hyper" version = "0.14.23" @@ -1415,9 +1453,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e394faa0efb47f9f227f1cd89978f854542b318a6f64fa695489c9c993056656" +checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" dependencies = [ "libc", "windows-sys", @@ -1425,9 +1463,9 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae5bc6e2eb41c9def29a3e0f1306382807764b9b53112030eff57435667352d" +checksum = "927609f78c2913a6f6ac3c27a4fe87f43e2a35367c0c4b0f8265e8f49a104330" dependencies = [ "hermit-abi 0.2.6", "io-lifetimes", @@ -1473,7 +1511,11 @@ name = "kaspa-core" version = "0.1.0" dependencies = [ "ctrlc", + "env_logger", + "futures-util", "intertrait", + "log", + "tokio", "wasm-bindgen", ] @@ -1502,15 +1544,20 @@ dependencies = [ name = "kaspad" version = "0.1.0" dependencies = [ + "clap 4.0.29", "consensus", "consensus-core", + "dirs", "futures-util", "hashes", "kaspa-core", + "log", "num-format", "rand 0.8.5", "rand_distr", "rayon", + "rpc-core", + "rpc-grpc", "tempfile", "thiserror", "tokio", @@ -1699,9 +1746,9 @@ dependencies = [ [[package]] name = "matchit" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "math" @@ -1911,9 +1958,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" dependencies = [ "cfg-if", "libc", @@ -2013,9 +2060,9 @@ checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "polling" -version = "2.5.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7d73f1eaed1ca1fb37b54dcc9b38e3b17d6c7b8ecb7abfffcac8d0351f17d4" +checksum = "166ca89eb77fd403230b9c156612965a81e094ec6ec3aa13663d4c8b113fa748" dependencies = [ "autocfg", "cfg-if", @@ -2095,9 +2142,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0841812012b2d4a6145fae9a6af1534873c32aa67fff26bd09f8fa42c83f95a" +checksum = "c0b18e655c21ff5ac2084a5ad0611e827b3f92badf79f4910b5a5c58f4d87ff0" dependencies = [ "bytes", "prost-derive", @@ -2105,9 +2152,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d8b442418ea0822409d9e7d047cbf1e7e9e1760b172bf9982cf29d517c93511" +checksum = "e330bf1316db56b12c2bcfa399e8edddd4821965ea25ddb2c134b610b1c1c604" dependencies = [ "bytes", "heck", @@ -2279,12 +2326,25 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_users" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +dependencies = [ + "getrandom 0.2.8", + "redox_syscall", + "thiserror", +] + [[package]] name = "regex" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" dependencies = [ + "aho-corasick", + "memchr", "regex-syntax", ] @@ -2317,6 +2377,7 @@ dependencies = [ name = "rpc-core" version = "0.1.0" dependencies = [ + "addresses", "ahash 0.8.2", "async-std", "async-trait", @@ -2328,6 +2389,7 @@ dependencies = [ "hashes", "kaspa-core", "kaspa-utils", + "log", "math", "rand 0.8.5", "serde", @@ -2347,6 +2409,7 @@ dependencies = [ "h2", "kaspa-core", "kaspa-utils", + "log", "prost", "rpc-core", "thiserror", @@ -2374,9 +2437,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.3" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1fbb4dfc4eb1d390c02df47760bb19a84bb80b301ecc947ab5406394d8223e" +checksum = "cb93e85278e08bb5788653183213d3a60fc242b10cb9be96586f5a73dcb67c23" dependencies = [ "bitflags", "errno", @@ -2522,7 +2585,7 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" name = "simpa" version = "0.1.0" dependencies = [ - "clap 4.0.27", + "clap 4.0.29", "consensus", "consensus-core", "futures", @@ -2578,9 +2641,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.14.8" +version = "1.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "341bba362c91aedad2ad9fc0c28c2e39aaa606e6b9c049e8fbcc9f60675163ff" +checksum = "3a5fb9c1bd1cf7ccc2b96209f0a9061afb7f0e94ca1ada6caf268fd4bc5274ff" dependencies = [ "ahash 0.7.6", "blake3", @@ -2612,9 +2675,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.14.8" +version = "1.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6fae474ab37e2ccc4dfd33edd36a05d7df02b8531fa9870cb244f9491b64fe3" +checksum = "091d54072f4c79ecf31bb472fcd53c15329666c33b8c2a94f13475b2a263712a" dependencies = [ "proc-macro2", "quote", @@ -2624,9 +2687,9 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.14.8" +version = "1.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f480a0a440ea15d8436de1c9ac01501cb15979dae4a0a5fc8e33198949b38681" +checksum = "58bee7b596fdf962d5619b6331b9b6a05144d5f04a22f95cd8706d940036135a" dependencies = [ "base64 0.13.1", "bincode", @@ -2673,9 +2736,9 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.14.8" +version = "1.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "768f16d1a7315fc66ba835eebf9e95a83365ac94222551bc5cdcc6a74cb4a137" +checksum = "62bb026ece5b73ec6cefcba5ef96496a28c53c99e767cc77d8abffa36127783d" dependencies = [ "bs58", "proc-macro2", @@ -2698,9 +2761,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.104" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce" +checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" dependencies = [ "proc-macro2", "quote", @@ -2848,9 +2911,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -2893,9 +2956,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55b9af819e54b8f33d453655bef9b9acc171568fb49523078d0cc4e7484200ec" +checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" dependencies = [ "async-stream", "async-trait", @@ -2926,9 +2989,9 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c6fd7c2581e36d63388a9e04c350c21beb7a8b059580b2e93993c526899ddc" +checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" dependencies = [ "prettyplease", "proc-macro2", @@ -3388,9 +3451,9 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" dependencies = [ "proc-macro2", "quote", @@ -3400,9 +3463,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.3+zstd.1.5.2" +version = "2.0.4+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44ccf97612ac95f3ccb89b2d7346b345e52f1c3019be4984f0455fb4ba991f8a" +checksum = "4fa202f2ef00074143e219d15b62ffc317d17cc33909feac471c044087cad7b0" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index 0754407ba..54ff2e804 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,3 +62,4 @@ borsh = "0.9.3" clap = { version = "4.0.23", features = ["derive"] } async-std = { version = "1.12.0", features = ['attributes'] } derive_more = { version = "0.99" } +log = "0.4" \ No newline at end of file diff --git a/README.md b/README.md index 811c4d6fc..7a2b49ac3 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ # Kaspa on Rust - Work in progress to implement the Kaspa full-node and related libraries in the Rust programming language. ## Getting started @@ -8,15 +7,54 @@ Work in progress to implement the Kaspa full-node and related libraries in the R - Install the [rust toolchain](https://rustup.rs/). - Run the following commands: + ```bash $ git clone https://github.com/kaspanet/rusty-kaspa -$ cd rusty-kaspa/kaspad -$ cargo run --release +$ cd rusty-kaspa +``` + +## Experimenting with the node + +The `kaspad` rust executable is currently at the initial stage where a devnet consensus instance can be built and mined locally through the RPC interface. The P2P network is not supported yet. To see it in action, perform the following: + +```bash +$ cargo run --bin kaspad --release +``` + +- Download and unzip the latest binaries bundle of [kaspanet/kaspad](https://github.com/kaspanet/kaspad/releases). + +- In a separate terminal run the kaspanet/kaspad miner: + +```bash +$ kaspaminer --rpcserver 127.0.0.1:16610 --devnet --miningaddr kaspadev:qrcqat6l9zcjsu7swnaztqzrv0s7hu04skpaezxk43y4etj8ncwfkuhy0zmax ``` -- This will run a short simulation producing a random DAG and processing it (applying all currently implemented logic). +- This will create and feed a DAG with the miner getting block templates from the node and submitting them back when mined. The node processes and stores the blocks while applying all currently implemented logic. Execution can be stopped and resumed, the data is persisted in a database. + +## Simulation framework (Simpa) + +Additionally, the current codebase supports a full in-process network simulation, building an actual DAG over virtual time with virtual delay and benchmarking validation time (following the simulation generation). Execute +```bash +cargo run --release --bin simpa -- --help +``` +to see the full command line configuration supported by `simpa`. For instance, the following command will run a simulation producing 1000 blocks with communication delay of 2 seconds and BPS=8, and attempts to fill each block with up to 200 transactions. + +```bash +$ cargo run --release --bin simpa -- -t=200 -d=2 -b=8 -n=1000 +``` + +## Logging + +Logging in `kaspad` and `simpa` can be [filtered](https://docs.rs/env_logger/0.10.0/env_logger/#filtering-results) either by defining the environment variable `RUST_LOG` and/or by adding a `--loglevel` argument to the command, ie.: + +```bash +$ cargo run --bin kaspad -- --loglevel info,rpc_core=trace,rpc_grpc=trace,consensus=trace,kaspa_core=trace +``` + +## Tests & Benchmarks - To run all current tests use: + ```bash $ cd rusty-kaspa $ cargo test --release @@ -25,6 +63,7 @@ $ cargo nextest run --release ``` - To run current benchmarks: + ```bash $ cd rusty-kaspa $ cargo bench diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 15c08194c..457ad1d75 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -28,6 +28,7 @@ rand.workspace = true indexmap.workspace = true smallvec.workspace = true kaspa-utils.workspace = true +log.workspace = true rocksdb = "0.19" parking_lot = "0.12" diff --git a/consensus/core/Cargo.toml b/consensus/core/Cargo.toml index 2133e98c2..781ab4b1a 100644 --- a/consensus/core/Cargo.toml +++ b/consensus/core/Cargo.toml @@ -16,4 +16,5 @@ serde.workspace = true faster-hex.workspace = true smallvec.workspace = true borsh.workspace = true -secp256k1 = { version = "0.24", features = ["global-context", "rand-std"] } \ No newline at end of file +secp256k1 = { version = "0.24", features = ["global-context", "rand-std"] } +futures-util.workspace = true diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs new file mode 100644 index 000000000..28756b7af --- /dev/null +++ b/consensus/core/src/api/mod.rs @@ -0,0 +1,22 @@ +use futures_util::future::BoxFuture; +use std::sync::Arc; + +use crate::{ + block::{Block, BlockTemplate}, + blockstatus::BlockStatus, + coinbase::MinerData, + tx::Transaction, +}; + +/// Abstracts the consensus external API +pub trait ConsensusApi: Send + Sync { + fn build_block_template(self: Arc, miner_data: MinerData, txs: Vec) -> BlockTemplate; + + fn validate_and_insert_block( + self: Arc, + block: Block, + update_virtual: bool, + ) -> BoxFuture<'static, Result>; +} + +pub type DynConsensus = Arc; diff --git a/consensus/core/src/blockstatus.rs b/consensus/core/src/blockstatus.rs new file mode 100644 index 000000000..e7703b2f7 --- /dev/null +++ b/consensus/core/src/blockstatus.rs @@ -0,0 +1,31 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Debug)] +pub enum BlockStatus { + /// StatusInvalid indicates that the block is invalid. + StatusInvalid, + + /// StatusUTXOValid indicates the block is valid from any UTXO related aspects and has passed all the other validations as well. + StatusUTXOValid, + + /// StatusUTXOPendingVerification indicates that the block is pending verification against its past UTXO-Set, either + /// because it was not yet verified since the block was never in the selected parent chain, or if the + /// block violates finality. + StatusUTXOPendingVerification, + + /// StatusDisqualifiedFromChain indicates that the block is not eligible to be a selected parent. + StatusDisqualifiedFromChain, + + /// StatusHeaderOnly indicates that the block transactions are not held (pruned or wasn't added yet) + StatusHeaderOnly, +} + +impl BlockStatus { + pub fn has_block_body(self) -> bool { + matches!(self, Self::StatusUTXOValid | Self::StatusUTXOPendingVerification | Self::StatusDisqualifiedFromChain) + } + + pub fn is_utxo_valid_or_pending(self) -> bool { + matches!(self, Self::StatusUTXOValid | Self::StatusUTXOPendingVerification) + } +} diff --git a/consensus/core/src/lib.rs b/consensus/core/src/lib.rs index 3c752bde8..9c5e1a2c2 100644 --- a/consensus/core/src/lib.rs +++ b/consensus/core/src/lib.rs @@ -3,8 +3,10 @@ use std::hash::{BuildHasher, Hasher}; use hashes::Hash; +pub mod api; pub mod block; pub mod blockhash; +pub mod blockstatus; pub mod coinbase; pub mod hashing; pub mod header; diff --git a/consensus/core/src/notify/mod.rs b/consensus/core/src/notify/mod.rs index 8a84776ff..ca3d4624d 100644 --- a/consensus/core/src/notify/mod.rs +++ b/consensus/core/src/notify/mod.rs @@ -3,9 +3,13 @@ use crate::block::Block; #[derive(Debug, Clone)] pub enum Notification { BlockAdded(BlockAddedNotification), + NewBlockTemplate(NewBlockTemplateNotification), } #[derive(Debug, Clone)] pub struct BlockAddedNotification { pub block: Block, } + +#[derive(Debug, Clone)] +pub struct NewBlockTemplateNotification {} diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index 2625e7056..1c4df6094 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -1,3 +1,4 @@ +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use serde::{Deserialize, Serialize}; use smallvec::SmallVec; use std::fmt::Display; @@ -62,7 +63,8 @@ impl UtxoEntry { pub type TransactionIndexType = u32; /// Represents a Kaspa transaction outpoint -#[derive(Eq, Hash, PartialEq, Debug, Copy, Clone, Serialize, Deserialize)] +#[derive(Eq, Hash, PartialEq, Debug, Copy, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] +#[serde(rename_all = "camelCase")] pub struct TransactionOutpoint { pub transaction_id: TransactionId, pub index: TransactionIndexType, diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index f42513704..bb59fcb8b 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -21,7 +21,7 @@ use crate::{ pruning::DbPruningStore, reachability::DbReachabilityStore, relations::DbRelationsStore, - statuses::{BlockStatus, DbStatusesStore, StatusesStoreReader}, + statuses::{DbStatusesStore, StatusesStoreReader}, tips::{DbTipsStore, TipsStoreReader}, utxo_diffs::DbUtxoDiffsStore, utxo_multisets::DbUtxoMultisetsStore, @@ -45,16 +45,19 @@ use crate::{ }, }; use consensus_core::{ + api::ConsensusApi, block::{Block, BlockTemplate}, + blockstatus::BlockStatus, coinbase::MinerData, tx::Transaction, BlockHashSet, }; use crossbeam_channel::{unbounded, Receiver, Sender}; +use futures_util::future::BoxFuture; use hashes::Hash; use kaspa_core::{core::Core, service::Service}; use parking_lot::RwLock; -use std::future::Future; +use std::{future::Future, sync::atomic::Ordering}; use std::{ ops::DerefMut, sync::Arc, @@ -392,10 +395,11 @@ impl Consensus { pub fn validate_and_insert_block(&self, block: Block) -> impl Future> { let (tx, rx): (BlockResultSender, _) = oneshot::channel(); self.block_sender.send(BlockTask::Process(block, vec![tx])).unwrap(); + self.counters.blocks_submitted.fetch_add(1, Ordering::SeqCst); async { rx.await.unwrap() } } - pub fn build_block_template(self: &Arc, miner_data: MinerData, txs: Vec) -> BlockTemplate { + pub fn build_block_template(&self, miner_data: MinerData, txs: Vec) -> BlockTemplate { self.virtual_processor.build_block_template(miner_data, txs) } @@ -407,6 +411,10 @@ impl Consensus { self.statuses_store.read().get(hash).unwrap() } + pub fn processing_counters(&self) -> &Arc { + &self.counters + } + pub fn signal_exit(&self) { self.block_sender.send(BlockTask::Exit).unwrap(); } @@ -420,6 +428,21 @@ impl Consensus { } } +impl ConsensusApi for Consensus { + fn build_block_template(self: Arc, miner_data: MinerData, txs: Vec) -> BlockTemplate { + self.as_ref().build_block_template(miner_data, txs) + } + + fn validate_and_insert_block( + self: Arc, + block: Block, + _update_virtual: bool, + ) -> BoxFuture<'static, Result> { + let result = self.as_ref().validate_and_insert_block(block); + Box::pin(async move { result.await.map_err(|err| err.to_string()) }) + } +} + impl Service for Consensus { fn ident(self: Arc) -> &'static str { "consensus" diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index 180302399..55999eff3 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -5,13 +5,17 @@ use std::{ }; use consensus_core::{ - block::{Block, MutableBlock}, + api::ConsensusApi, + block::{Block, BlockTemplate, MutableBlock}, + blockstatus::BlockStatus, + coinbase::MinerData, header::Header, merkle::calc_hash_merkle_root, subnets::SUBNETWORK_ID_COINBASE, tx::Transaction, BlockHashSet, }; +use futures_util::future::BoxFuture; use hashes::Hash; use kaspa_core::{core::Core, service::Service}; use parking_lot::RwLock; @@ -26,7 +30,6 @@ use crate::{ headers::{DbHeadersStore, HeaderStoreReader}, pruning::PruningStoreReader, reachability::DbReachabilityStore, - statuses::BlockStatus, DB, }, params::Params, @@ -38,19 +41,19 @@ use crate::{ use super::{Consensus, DbGhostdagManager}; pub struct TestConsensus { - consensus: Consensus, + consensus: Arc, pub params: Params, temp_db_lifetime: TempDbLifetime, } impl TestConsensus { pub fn new(db: Arc, params: &Params) -> Self { - Self { consensus: Consensus::new(db, params), params: params.clone(), temp_db_lifetime: Default::default() } + Self { consensus: Arc::new(Consensus::new(db, params)), params: params.clone(), temp_db_lifetime: Default::default() } } pub fn create_from_temp_db(params: &Params) -> Self { let (temp_db_lifetime, db) = create_temp_db(); - Self { consensus: Consensus::new(db, params), params: params.clone(), temp_db_lifetime } + Self { consensus: Arc::new(Consensus::new(db, params)), params: params.clone(), temp_db_lifetime } } pub fn build_header_with_parents(&self, hash: Hash, parents: Vec) -> Header { @@ -102,7 +105,7 @@ impl TestConsensus { } pub fn validate_and_insert_block(&self, block: Block) -> impl Future> { - self.consensus.validate_and_insert_block(block) + self.consensus.as_ref().validate_and_insert_block(block) } pub fn init(&self) -> Vec> { @@ -154,6 +157,20 @@ impl TestConsensus { } } +impl ConsensusApi for TestConsensus { + fn build_block_template(self: Arc, miner_data: MinerData, txs: Vec) -> BlockTemplate { + self.consensus.clone().build_block_template(miner_data, txs) + } + + fn validate_and_insert_block( + self: Arc, + block: Block, + update_virtual: bool, + ) -> BoxFuture<'static, Result> { + self.consensus.clone().validate_and_insert_block(block, update_virtual) + } +} + impl Service for TestConsensus { fn ident(self: Arc) -> &'static str { "test-consensus" diff --git a/consensus/src/model/services/statuses.rs b/consensus/src/model/services/statuses.rs index a945d672d..ecaa5f83b 100644 --- a/consensus/src/model/services/statuses.rs +++ b/consensus/src/model/services/statuses.rs @@ -1,4 +1,5 @@ -use crate::model::stores::statuses::{BlockStatus, StatusesStoreReader}; +use crate::model::stores::statuses::StatusesStoreReader; +use consensus_core::blockstatus::BlockStatus; use hashes::Hash; use parking_lot::RwLock; use std::sync::Arc; diff --git a/consensus/src/model/stores/statuses.rs b/consensus/src/model/stores/statuses.rs index 8a05ee205..29609ba97 100644 --- a/consensus/src/model/stores/statuses.rs +++ b/consensus/src/model/stores/statuses.rs @@ -1,7 +1,6 @@ -use consensus_core::BlockHasher; +use consensus_core::{blockstatus::BlockStatus, BlockHasher}; use parking_lot::{RwLock, RwLockWriteGuard}; use rocksdb::WriteBatch; -use serde::{Deserialize, Serialize}; use std::sync::Arc; use super::{ @@ -11,36 +10,6 @@ use super::{ }; use hashes::Hash; -#[derive(Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Debug)] -pub enum BlockStatus { - /// StatusInvalid indicates that the block is invalid. - StatusInvalid, - - /// StatusUTXOValid indicates the block is valid from any UTXO related aspects and has passed all the other validations as well. - StatusUTXOValid, - - /// StatusUTXOPendingVerification indicates that the block is pending verification against its past UTXO-Set, either - /// because it was not yet verified since the block was never in the selected parent chain, or if the - /// block violates finality. - StatusUTXOPendingVerification, - - /// StatusDisqualifiedFromChain indicates that the block is not eligible to be a selected parent. - StatusDisqualifiedFromChain, - - /// StatusHeaderOnly indicates that the block transactions are not held (pruned or wasn't added yet) - StatusHeaderOnly, -} - -impl BlockStatus { - pub fn has_block_body(self) -> bool { - matches!(self, Self::StatusUTXOValid | Self::StatusUTXOPendingVerification | Self::StatusDisqualifiedFromChain) - } - - pub fn is_utxo_valid_or_pending(self) -> bool { - matches!(self, Self::StatusUTXOValid | Self::StatusUTXOPendingVerification) - } -} - /// Reader API for `StatusesStore`. pub trait StatusesStoreReader { fn get(&self, hash: Hash) -> StoreResult; diff --git a/consensus/src/params.rs b/consensus/src/params.rs index 9031d023f..ed994e255 100644 --- a/consensus/src/params.rs +++ b/consensus/src/params.rs @@ -95,7 +95,7 @@ pub const DEVNET_PARAMS: Params = Params { max_block_parents: 10, difficulty_window_size: 2641, genesis_timestamp: 0, // TODO: Use real value - genesis_bits: 0x207fffff, // TODO: Use real value + genesis_bits: 0x1e21bc1c, // As observed on testnet mergeset_size_limit: (DEFAULT_GHOSTDAG_K as u64) * 10, merge_depth: 3600, finality_depth: 86400, diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index de903f363..af3c09121 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -9,10 +9,7 @@ use crate::{ ghostdag::DbGhostdagStore, headers::DbHeadersStore, reachability::DbReachabilityStore, - statuses::{ - BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, - DbStatusesStore, StatusesStore, StatusesStoreBatchExtensions, StatusesStoreReader, - }, + statuses::{DbStatusesStore, StatusesStore, StatusesStoreBatchExtensions, StatusesStoreReader}, tips::DbTipsStore, DB, }, @@ -23,7 +20,12 @@ use crate::{ transaction_validator::TransactionValidator, }, }; -use consensus_core::{block::Block, subnets::SUBNETWORK_ID_COINBASE, tx::Transaction}; +use consensus_core::{ + block::Block, + blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, + subnets::SUBNETWORK_ID_COINBASE, + tx::Transaction, +}; use crossbeam_channel::{Receiver, Sender}; use hashes::Hash; use parking_lot::RwLock; diff --git a/consensus/src/pipeline/deps_manager.rs b/consensus/src/pipeline/deps_manager.rs index a8098df67..71ae8f2e5 100644 --- a/consensus/src/pipeline/deps_manager.rs +++ b/consensus/src/pipeline/deps_manager.rs @@ -1,5 +1,5 @@ -use crate::{errors::BlockProcessResult, model::stores::statuses::BlockStatus}; -use consensus_core::{block::Block, BlockHashMap, HashMapCustomHasher}; +use crate::errors::BlockProcessResult; +use consensus_core::{block::Block, blockstatus::BlockStatus, BlockHashMap, HashMapCustomHasher}; use hashes::Hash; use parking_lot::{Condvar, Mutex}; use std::collections::hash_map::Entry::Vacant; diff --git a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs index 655191c56..ec426367a 100644 --- a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs @@ -3,8 +3,9 @@ use crate::constants; use crate::errors::{BlockProcessResult, RuleError}; use crate::model::services::reachability::ReachabilityService; use crate::model::stores::errors::StoreResultExtensions; -use crate::model::stores::statuses::{BlockStatus::StatusInvalid, StatusesStoreReader}; +use crate::model::stores::statuses::StatusesStoreReader; use consensus_core::blockhash::BlockHashExtensions; +use consensus_core::blockstatus::BlockStatus::StatusInvalid; use consensus_core::header::Header; use std::{ sync::Arc, diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 2e51f33aa..6125d5786 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -14,10 +14,7 @@ use crate::{ pruning::{DbPruningStore, PruningPointInfo, PruningStore, PruningStoreReader}, reachability::{DbReachabilityStore, StagingReachabilityStore}, relations::{DbRelationsStore, RelationsStoreBatchExtensions}, - statuses::{ - BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, - DbStatusesStore, StatusesStore, StatusesStoreBatchExtensions, StatusesStoreReader, - }, + statuses::{DbStatusesStore, StatusesStore, StatusesStoreBatchExtensions, StatusesStoreReader}, DB, }, }, @@ -37,6 +34,7 @@ use crate::{ }; use consensus_core::{ blockhash::{BlockHashes, ORIGIN}, + blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, header::Header, BlockHashSet, }; diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 85f89d7c2..e80f29f2a 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -15,10 +15,7 @@ use crate::{ pruning::{DbPruningStore, PruningStore, PruningStoreReader}, reachability::DbReachabilityStore, relations::DbRelationsStore, - statuses::{ - BlockStatus::{self, StatusDisqualifiedFromChain, StatusUTXOPendingVerification, StatusUTXOValid}, - DbStatusesStore, StatusesStore, StatusesStoreBatchExtensions, StatusesStoreReader, - }, + statuses::{DbStatusesStore, StatusesStore, StatusesStoreBatchExtensions, StatusesStoreReader}, tips::{DbTipsStore, TipsStoreReader}, utxo_diffs::{DbUtxoDiffsStore, UtxoDiffsStoreReader}, utxo_multisets::{DbUtxoMultisetsStore, UtxoMultisetsStoreReader}, @@ -37,6 +34,7 @@ use crate::{ }; use consensus_core::{ block::{BlockTemplate, MutableBlock}, + blockstatus::BlockStatus::{self, StatusDisqualifiedFromChain, StatusUTXOPendingVerification, StatusUTXOValid}, coinbase::MinerData, header::Header, merkle::calc_hash_merkle_root, @@ -462,6 +460,7 @@ impl VirtualStateProcessor { Err(err) => panic!("unexpected store error {}", err), } } + StatusUTXOValid => {} _ => panic!("unexpected genesis status {:?}", status), } } diff --git a/consensus/tests/integration_tests.rs b/consensus/tests/integration_tests.rs index 1429d526b..a70566572 100644 --- a/consensus/tests/integration_tests.rs +++ b/consensus/tests/integration_tests.rs @@ -8,11 +8,11 @@ use consensus::errors::{BlockProcessResult, RuleError}; use consensus::model::stores::ghostdag::{GhostdagStoreReader, KType as GhostdagKType}; use consensus::model::stores::headers::HeaderStoreReader; use consensus::model::stores::reachability::DbReachabilityStore; -use consensus::model::stores::statuses::BlockStatus; use consensus::params::{Params, DEVNET_PARAMS, MAINNET_PARAMS}; use consensus::processes::reachability::tests::{DagBlock, DagBuilder, StoreValidationExtensions}; use consensus_core::block::Block; use consensus_core::blockhash::new_unique; +use consensus_core::blockstatus::BlockStatus; use consensus_core::header::Header; use consensus_core::subnets::SubnetworkId; use consensus_core::tx::{ScriptPublicKey, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput}; diff --git a/consensus/tests/pipeline_tests.rs b/consensus/tests/pipeline_tests.rs index 37d2f1a69..ce178e8d9 100644 --- a/consensus/tests/pipeline_tests.rs +++ b/consensus/tests/pipeline_tests.rs @@ -1,14 +1,11 @@ use consensus::{ consensus::test_consensus::{create_temp_db, TestConsensus}, errors::RuleError, - model::stores::{ - reachability::{DbReachabilityStore, StagingReachabilityStore}, - statuses::BlockStatus, - }, + model::stores::reachability::{DbReachabilityStore, StagingReachabilityStore}, params::MAINNET_PARAMS, processes::reachability::tests::{DagBlock, DagBuilder, StoreValidationExtensions}, }; -use consensus_core::blockhash; +use consensus_core::{blockhash, blockstatus::BlockStatus}; use futures_util::future::join_all; use hashes::Hash; use parking_lot::RwLock; diff --git a/core/Cargo.toml b/core/Cargo.toml index 75fad521a..739023569 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -7,8 +7,12 @@ include.workspace = true license.workspace = true [dependencies] +futures-util.workspace = true +tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } wasm-bindgen.workspace = true +log.workspace = true ctrlc = "3.2" +env_logger = "0.10" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] intertrait = "0.2" diff --git a/core/src/lib.rs b/core/src/lib.rs index 0d602982a..d5ad5f3c1 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -6,3 +6,4 @@ pub mod core; pub mod log; pub mod service; pub mod signals; +pub mod task; diff --git a/core/src/log.rs b/core/src/log.rs index 3586cc680..a0bd17d1d 100644 --- a/core/src/log.rs +++ b/core/src/log.rs @@ -1,4 +1,20 @@ -/// TODO: implement a proper logger with reused macro logic +//! Logger and logging macros +//! +//! For the macros to properly compile, the calling crate must add a dependency to +//! crate log (ie. `log.workspace = true`) when target architecture is not wasm32. + +// TODO: enhance logger with parallel output to file, rotation, compression + +#[cfg(not(target_arch = "wasm32"))] +pub fn init_logger(filters: &str) { + env_logger::Builder::new() + .format_target(false) + .format_timestamp_secs() + .filter_level(log::LevelFilter::Info) + .parse_default_env() + .parse_filters(filters) + .init(); +} #[cfg(target_arch = "wasm32")] #[macro_export] @@ -15,16 +31,13 @@ macro_rules! trace { #[macro_export] macro_rules! trace { ($($t:tt)*) => { - #[allow(unused_unsafe)] - let _ = format_args!($($t)*); // Dummy code for using the variables - // Disable trace until we implement log-level cmd configuration - // unsafe { println!("TRACE: {}",&format_args!($($t)*).to_string()) } + log::trace!($($t)*); }; } #[cfg(target_arch = "wasm32")] #[macro_export] -macro_rules! info { +macro_rules! debug { ($($t:tt)*) => ( #[allow(unused_unsafe)] unsafe { core::console::log(&format_args!($($t)*).to_string()) } @@ -33,10 +46,26 @@ macro_rules! info { #[cfg(not(target_arch = "wasm32"))] #[macro_export] +macro_rules! debug { + ($($t:tt)*) => ( + log::debug!($($t)*); + ) +} + +#[cfg(target_arch = "wasm32")] +#[macro_export] macro_rules! info { ($($t:tt)*) => ( #[allow(unused_unsafe)] - unsafe { println!("INFO: {}",&format_args!($($t)*).to_string()) } + unsafe { core::console::log(&format_args!($($t)*).to_string()) } + ) +} + +#[cfg(not(target_arch = "wasm32"))] +#[macro_export] +macro_rules! info { + ($($t:tt)*) => ( + log::info!($($t)*); ) } @@ -53,8 +82,7 @@ macro_rules! warn { #[macro_export] macro_rules! warn { ($($t:tt)*) => ( - #[allow(unused_unsafe)] - unsafe { println!("WARN: {}",&format_args!($($t)*).to_string()) } + log::warn!($($t)*); ) } @@ -71,7 +99,6 @@ macro_rules! error { #[macro_export] macro_rules! error { ($($t:tt)*) => ( - #[allow(unused_unsafe)] - unsafe { println!("ERROR: {}",&format_args!($($t)*).to_string()) } + log::error!($($t)*); ) } diff --git a/core/src/task/mod.rs b/core/src/task/mod.rs new file mode 100644 index 000000000..7df504a4e --- /dev/null +++ b/core/src/task/mod.rs @@ -0,0 +1,4 @@ +pub mod runtime; +pub mod service; + +// TODO: Determine the most appropriate location for task diff --git a/core/src/task/runtime.rs b/core/src/task/runtime.rs new file mode 100644 index 000000000..ba7e28ab3 --- /dev/null +++ b/core/src/task/runtime.rs @@ -0,0 +1,86 @@ +use futures_util::future::join_all; +use kaspa_core::core::Core; +use kaspa_core::service::Service; +use kaspa_core::task::service::AsyncService; +use kaspa_core::trace; +use std::{ + sync::{Arc, Mutex}, + thread::{self, JoinHandle as ThreadJoinHandle}, +}; +use tokio::task::{JoinError, JoinHandle as TaskJoinHandle}; + +const ASYNC_RUNTIME: &str = "async-runtime"; + +/// AsyncRuntime registers async services and provides +/// a tokio Runtime to run them. +pub struct AsyncRuntime { + services: Mutex>>, +} + +impl Default for AsyncRuntime { + fn default() -> Self { + Self::new() + } +} + +impl AsyncRuntime { + pub fn new() -> Self { + trace!("Creating the async-runtime service"); + Self { services: Mutex::new(Vec::new()) } + } + + pub fn register(&self, service: Arc) + where + T: AsyncService, + { + self.services.lock().unwrap().push(service); + } + + pub fn init(self: Arc) -> Vec> { + trace!("initializing async-runtime service"); + vec![thread::Builder::new().name(ASYNC_RUNTIME.to_string()).spawn(move || self.worker()).unwrap()] + } + + /// Launch a tokio Runtime and run the top-level async objects + #[tokio::main(worker_threads = 2)] + // TODO: increase the number of threads if needed + // TODO: build the runtime explicitly and dedicate a number of threads based on the host specs + pub async fn worker(self: &Arc) { + // Start all async services + // All services futures are spawned as tokio tasks to enable parallelism + trace!("async-runtime worker starting"); + let futures = + self.services.lock().unwrap().iter().map(|x| tokio::spawn(x.clone().start())).collect::>>(); + join_all(futures).await.into_iter().collect::, JoinError>>().unwrap(); + + // Stop all async services + // All services futures are spawned as tokio tasks to enable parallelism + trace!("async-runtime worker stopping"); + let futures = + self.services.lock().unwrap().iter().map(|x| tokio::spawn(x.clone().stop())).collect::>>(); + join_all(futures).await.into_iter().collect::, JoinError>>().unwrap(); + + trace!("async-runtime worker exiting"); + } + + pub fn signal_exit(self: Arc) { + trace!("Sending an exit signal to all async-runtime services"); + for service in self.services.lock().unwrap().iter() { + service.clone().signal_exit(); + } + } +} + +impl Service for AsyncRuntime { + fn ident(self: Arc) -> &'static str { + ASYNC_RUNTIME + } + + fn start(self: Arc, _core: Arc) -> Vec> { + self.init() + } + + fn stop(self: Arc) { + self.signal_exit() + } +} diff --git a/core/src/task/service.rs b/core/src/task/service.rs new file mode 100644 index 000000000..e75f0211f --- /dev/null +++ b/core/src/task/service.rs @@ -0,0 +1,13 @@ +use futures_util::future::BoxFuture; +use intertrait::CastFromSync; + +use std::sync::Arc; + +pub type AsyncServiceFuture = BoxFuture<'static, ()>; + +pub trait AsyncService: CastFromSync { + fn ident(self: Arc) -> &'static str; + fn start(self: Arc) -> AsyncServiceFuture; + fn signal_exit(self: Arc); + fn stop(self: Arc) -> AsyncServiceFuture; +} diff --git a/crypto/addresses/Cargo.toml b/crypto/addresses/Cargo.toml index c1aca414c..6ae6166f9 100644 --- a/crypto/addresses/Cargo.toml +++ b/crypto/addresses/Cargo.toml @@ -7,6 +7,8 @@ include.workspace = true license.workspace = true [dependencies] +borsh.workspace = true +serde.workspace = true [dev-dependencies] criterion.workspace = true diff --git a/crypto/addresses/src/lib.rs b/crypto/addresses/src/lib.rs index 65fc303e3..729840cdd 100644 --- a/crypto/addresses/src/lib.rs +++ b/crypto/addresses/src/lib.rs @@ -1,3 +1,5 @@ +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; mod bech32; @@ -25,7 +27,9 @@ impl Display for AddressError { } } -#[derive(PartialEq, Eq, Clone, Debug)] +impl std::error::Error for AddressError {} + +#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] pub enum Prefix { Mainnet, Testnet, @@ -73,7 +77,7 @@ impl TryFrom<&str> for Prefix { } } -#[derive(PartialEq, Eq, Clone, Debug)] +#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] pub struct Address { pub prefix: Prefix, pub payload: Vec, @@ -82,6 +86,12 @@ pub struct Address { impl From
for String { fn from(address: Address) -> Self { + (&address).into() + } +} + +impl From<&Address> for String { + fn from(address: &Address) -> Self { format!("{}:{}", address.prefix, address.encode_payload()) } } diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index e5885773f..0bbdbf2a0 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -12,6 +12,8 @@ hashes.workspace = true kaspa-core.workspace = true consensus-core.workspace = true consensus.workspace = true +rpc-core.workspace = true +rpc-grpc.workspace = true thiserror.workspace = true futures-util.workspace = true @@ -19,5 +21,8 @@ rand.workspace = true rayon.workspace = true tempfile.workspace = true tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } +clap.workspace = true +log.workspace = true rand_distr = "0.4" -num-format = "0.4" \ No newline at end of file +num-format = "0.4" +dirs = "4.0" diff --git a/kaspad/src/emulator.rs b/kaspad/src/emulator.rs deleted file mode 100644 index 44bd6c929..000000000 --- a/kaspad/src/emulator.rs +++ /dev/null @@ -1,170 +0,0 @@ -use consensus::{ - consensus::test_consensus::TestConsensus, errors::RuleError, model::stores::statuses::BlockStatus, pipeline::ProcessingCounters, -}; -use futures_util::future::join_all; -use hashes::Hash; -use kaspa_core::{core::Core, service::Service, signals::Shutdown, trace}; -use num_format::{Locale, ToFormattedString}; -use rand_distr::{Distribution, Poisson}; -use std::{ - cmp::min, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - thread::{self, spawn, JoinHandle}, - time::Duration, -}; - -/// Emits blocks randomly in the round-based model where number of -/// blocks in each round is distributed ~ Poisson(bps * delay). -pub struct RandomBlockEmitter { - terminate: AtomicBool, - consensus: Arc, - genesis: Hash, - max_block_parents: u64, - bps: f64, - delay: f64, - target_blocks: u64, - - // Counters - counters: Arc, -} - -impl RandomBlockEmitter { - pub fn new( - consensus: Arc, - genesis: Hash, - max_block_parents: u64, - bps: f64, - delay: f64, - target_blocks: u64, - ) -> Self { - let counters = consensus.processing_counters().clone(); - Self { terminate: AtomicBool::new(false), consensus, genesis, max_block_parents, bps, delay, target_blocks, counters } - } - - #[tokio::main] - pub async fn worker(self: &Arc, core: Arc) { - let poi = Poisson::new(self.bps * self.delay).unwrap(); - let mut thread_rng = rand::thread_rng(); - - let mut tips = vec![self.genesis]; - let mut total = 0; - let mut timestamp = 0u64; - - while total < self.target_blocks { - let v = min(self.max_block_parents, poi.sample(&mut thread_rng) as u64); - timestamp += (self.delay as u64) * 1000; - if v == 0 { - continue; - } - - if self.terminate.load(Ordering::SeqCst) { - break; - } - - let mut new_tips = Vec::with_capacity(v as usize); - let mut futures = Vec::new(); - - self.counters.blocks_submitted.fetch_add(v, Ordering::SeqCst); - - for i in 0..v { - // Create a new block referencing all tips from the previous round - let mut b = self.consensus.build_block_with_parents(Default::default(), tips.clone()); - b.header.timestamp = timestamp; - b.header.nonce = i; - b.header.finalize(); - new_tips.push(b.header.hash); - // Submit to consensus - let f = self.consensus.validate_and_insert_block(b.to_immutable()); - futures.push(f); - } - join_all(futures).await.into_iter().collect::, RuleError>>().unwrap(); - - tips = new_tips; - total += v; - } - core.shutdown(); - } -} - -impl Service for RandomBlockEmitter { - fn ident(self: Arc) -> &'static str { - "block-emitter" - } - - fn start(self: Arc, core: Arc) -> Vec> { - vec![spawn(move || self.worker(core))] - } - - fn stop(self: Arc) { - self.terminate.store(true, Ordering::SeqCst); - } -} - -impl Shutdown for RandomBlockEmitter { - fn shutdown(self: &Arc) { - self.terminate.store(true, Ordering::SeqCst); - } -} - -pub struct ConsensusMonitor { - terminate: AtomicBool, - // Counters - counters: Arc, -} - -impl ConsensusMonitor { - pub fn new(counters: Arc) -> ConsensusMonitor { - ConsensusMonitor { terminate: AtomicBool::new(false), counters } - } - - pub fn worker(self: &Arc) { - let mut last_snapshot = self.counters.snapshot(); - - loop { - thread::sleep(Duration::from_millis(1000)); - - if self.terminate.load(Ordering::SeqCst) { - break; - } - - let snapshot = self.counters.snapshot(); - - let send_rate = snapshot.blocks_submitted - last_snapshot.blocks_submitted; - let header_rate = snapshot.header_counts - last_snapshot.header_counts; - let deps_rate = snapshot.dep_counts - last_snapshot.dep_counts; - let pending = snapshot.blocks_submitted - snapshot.header_counts; - - trace!( - "sent: {}, processed: {}, pending: {}, -> send rate b/s: {}, process rate b/s: {}, deps rate e/s: {}", - snapshot.blocks_submitted.to_formatted_string(&Locale::en), - snapshot.header_counts.to_formatted_string(&Locale::en), - pending.to_formatted_string(&Locale::en), - send_rate.to_formatted_string(&Locale::en), - header_rate.to_formatted_string(&Locale::en), - deps_rate.to_formatted_string(&Locale::en), - ); - - last_snapshot = snapshot; - } - - trace!("monitor thread exiting"); - } -} - -// service trait implementation for Monitor -impl Service for ConsensusMonitor { - fn ident(self: Arc) -> &'static str { - "consensus-monitor" - } - - fn start(self: Arc, _core: Arc) -> Vec> { - vec![spawn(move || self.worker())] - } - - fn stop(self: Arc) { - self.terminate.store(true, Ordering::SeqCst); - } -} diff --git a/kaspad/src/main.rs b/kaspad/src/main.rs index 49e49ab71..bcc0f8ba3 100644 --- a/kaspad/src/main.rs +++ b/kaspad/src/main.rs @@ -2,62 +2,109 @@ extern crate consensus; extern crate core; extern crate hashes; +use clap::Parser; +use consensus::model::stores::DB; +use kaspa_core::{core::Core, signals::Signals, task::runtime::AsyncRuntime}; +use std::fs; +use std::path::PathBuf; use std::sync::Arc; +use thiserror::__private::PathAsDisplay; + +use crate::monitor::ConsensusMonitor; +use consensus::consensus::Consensus; +use consensus::params::DEVNET_PARAMS; +use kaspa_core::{info, trace}; +use rpc_core::server::collector::ConsensusNotificationChannel; +use rpc_core::server::RpcCoreServer; +use rpc_grpc::server::GrpcServer; + +mod monitor; + +const DEFAULT_DATA_DIR: &str = "datadir"; + +// TODO: add a Config +// TODO: apply Args to Config +// TODO: log to file + +/// Kaspa Node launch arguments +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Directory to store data + #[arg(short = 'b', long = "appdir")] + app_dir: Option, + + /// Interface/port to listen for RPC connections (default port: 16110, testnet: 16210) + #[arg(long = "rpclisten")] + rpc_listen: Option, + + /// Logging level for all subsystems {off, error, warn, info, debug, trace} + /// -- You may also specify =,=,... to set the log level for individual subsystems + #[arg(short = 'd', long = "loglevel", default_value = "info")] + log_level: String, +} -use consensus::consensus::test_consensus::TestConsensus; -use consensus::params::MAINNET_PARAMS; -use consensus_core::blockhash; -use hashes::Hash; -use kaspa_core::core::Core; -use kaspa_core::*; - -use crate::emulator::ConsensusMonitor; +fn get_home_dir() -> PathBuf { + #[cfg(target_os = "windows")] + return dirs::data_local_dir().unwrap(); + #[cfg(not(target_os = "windows"))] + return dirs::home_dir().unwrap(); +} -mod emulator; +fn get_app_dir() -> PathBuf { + #[cfg(target_os = "windows")] + return get_home_dir().join("kaspa-rust"); + #[cfg(not(target_os = "windows"))] + return get_home_dir().join(".kaspa-rust"); +} pub fn main() { - let genesis: Hash = blockhash::new_unique(); - let bps = 8.0; - let delay = 2.0; - let target_blocks = 32000; - - trace!("Kaspad starting... (round-based simulation with BPS={} and D={})", bps, delay); - trace!("\n\n ------ NOTE: this code is just a placeholder for the actual kaspad code, for an actual simulation run the simpa binary ------\n\n"); - - // rayon::ThreadPoolBuilder::new() - // .num_threads(8) - // .build_global() - // .unwrap(); - - println!("Using rayon thread pool with {} threads", rayon::current_num_threads()); + // Get CLI arguments + let args = Args::parse(); + + // Initialize the logger + kaspa_core::log::init_logger(&args.log_level); + + info!("{} v{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")); + + // TODO: Refactor all this quick-and-dirty code + let app_dir = args + .app_dir + .unwrap_or_else(|| get_app_dir().as_path().to_str().unwrap().to_string()) + .replace('~', get_home_dir().as_path().to_str().unwrap()); + let app_dir = if app_dir.is_empty() { get_app_dir() } else { PathBuf::from(app_dir) }; + let db_dir = app_dir.join(DEFAULT_DATA_DIR); + assert!(!db_dir.to_str().unwrap().is_empty()); + info!("Application directory: {}", app_dir.as_display()); + info!("Data directory: {}", db_dir.as_display()); + fs::create_dir_all(db_dir.as_path()).unwrap(); + let grpc_server_addr = args.rpc_listen.unwrap_or_else(|| "127.0.0.1:16610".to_string()).parse().unwrap(); let core = Arc::new(Core::new()); // --- - let mut params = MAINNET_PARAMS.clone_with_skip_pow(); - params.genesis_hash = genesis; - params.genesis_timestamp = 0; - - // Make sure to create the DB first, so it cleans up last - let consensus = Arc::new(TestConsensus::create_from_temp_db(¶ms)); + let params = DEVNET_PARAMS; + let db = Arc::new(DB::open_default(db_dir.to_str().unwrap()).unwrap()); + let consensus = Arc::new(Consensus::new(db, ¶ms)); let monitor = Arc::new(ConsensusMonitor::new(consensus.processing_counters().clone())); - let emitter = Arc::new(emulator::RandomBlockEmitter::new( - consensus.clone(), - genesis, - params.max_block_parents.into(), - bps, - delay, - target_blocks, - )); - - // Bind the keyboard signal to the emitter. The emitter will then shutdown core - Arc::new(signals::Signals::new(&emitter)).init(); + + let notification_channel = ConsensusNotificationChannel::default(); + let rpc_core_server = Arc::new(RpcCoreServer::new(consensus.clone(), notification_channel.receiver())); + let grpc_server = Arc::new(GrpcServer::new(grpc_server_addr, rpc_core_server.service())); + + // Create an async runtime and register the top-level async services + let async_runtime = Arc::new(AsyncRuntime::new()); + async_runtime.register(rpc_core_server); + async_runtime.register(grpc_server); + + // Bind the keyboard signal to the core + Arc::new(Signals::new(&core)).init(); // Consensus must start first in order to init genesis in stores core.bind(consensus); - core.bind(emitter); core.bind(monitor); + core.bind(async_runtime); core.run(); diff --git a/kaspad/src/monitor.rs b/kaspad/src/monitor.rs new file mode 100644 index 000000000..bf3045d33 --- /dev/null +++ b/kaspad/src/monitor.rs @@ -0,0 +1,71 @@ +use consensus::pipeline::ProcessingCounters; +use kaspa_core::{core::Core, info, service::Service, trace}; +use num_format::{Locale, ToFormattedString}; +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread::{self, spawn, JoinHandle}, + time::Duration, +}; + +pub struct ConsensusMonitor { + terminate: AtomicBool, + // Counters + counters: Arc, +} + +impl ConsensusMonitor { + pub fn new(counters: Arc) -> ConsensusMonitor { + ConsensusMonitor { terminate: AtomicBool::new(false), counters } + } + + pub fn worker(self: &Arc) { + let mut last_snapshot = self.counters.snapshot(); + let snapshot_interval = 10; + loop { + thread::sleep(Duration::from_secs(snapshot_interval)); + + if self.terminate.load(Ordering::SeqCst) { + break; + } + + let snapshot = self.counters.snapshot(); + + let send_rate = (snapshot.blocks_submitted - last_snapshot.blocks_submitted) as f64 / snapshot_interval as f64; + let header_rate = (snapshot.header_counts - last_snapshot.header_counts) as f64 / snapshot_interval as f64; + let deps_rate = (snapshot.dep_counts - last_snapshot.dep_counts) as f64 / snapshot_interval as f64; + let pending: i64 = i64::try_from(snapshot.blocks_submitted).unwrap() - i64::try_from(snapshot.header_counts).unwrap(); + + info!( + "sent: {}, processed: {}, pending: {}, -> send rate b/s: {:.2}, process rate b/s: {:.2}, deps rate e/s: {:.2}", + snapshot.blocks_submitted.to_formatted_string(&Locale::en), + snapshot.header_counts.to_formatted_string(&Locale::en), + pending.to_formatted_string(&Locale::en), + send_rate, + header_rate, + deps_rate, + ); + + last_snapshot = snapshot; + } + + trace!("monitor thread exiting"); + } +} + +// service trait implementation for Monitor +impl Service for ConsensusMonitor { + fn ident(self: Arc) -> &'static str { + "consensus-monitor" + } + + fn start(self: Arc, _core: Arc) -> Vec> { + vec![spawn(move || self.worker())] + } + + fn stop(self: Arc) { + self.terminate.store(true, Ordering::SeqCst); + } +} diff --git a/rpc/core/Cargo.toml b/rpc/core/Cargo.toml index b2b29ff7e..9d92a2cb0 100644 --- a/rpc/core/Cargo.toml +++ b/rpc/core/Cargo.toml @@ -8,6 +8,7 @@ license.workspace = true [dependencies] consensus-core.workspace = true +addresses.workspace = true hashes.workspace = true math.workspace = true kaspa-core.workspace = true @@ -18,6 +19,7 @@ derive_more.workspace = true thiserror.workspace = true borsh.workspace = true async-std.workspace = true +log.workspace = true async-trait = "0.1.57" ahash = "0.8.0" futures = { version = "0.3" } diff --git a/rpc/core/src/api/Extending RpcApi.md b/rpc/core/src/api/Extending RpcApi.md new file mode 100644 index 000000000..f27ebbfe2 --- /dev/null +++ b/rpc/core/src/api/Extending RpcApi.md @@ -0,0 +1,43 @@ +# HOWTO Extend the RPC Api by adding a new method + +As an illustration, let's pretend that we add a new `submit_block` method. + +## consensus-core + +1. If necessary, add a function into the ConsensusApi trait. + +## consensus + +1. Implement the function for Consensus + +## rpc-core + +1. Create an op variant in `rpc_core::api::ops::RpcApiOps` + (ie. `SubmitBlock`) +2. Create in `rpc_core::model::message` a pair of request and response structures + (ie. `SubmitBlockRequest` and `SubmitBlockResponse`). +3. Implement a constructor for the request. +4. If necessary, implement converters to handle consensus-core <-> rpc-core under `rpc_core::convert`. +5. Add a pair of new async functions to the `rpc_core::api::RpcApi` trait, one with detailed parameters + and one with a unique request message. + Implement the first as a call to the second. + (ie. `async fn submit_block(&self, block: RpcBlock, allow_non_daa_blocks: bool) -> RpcResult` and + `async fn submit_block_call(&self, request: SubmitBlockRequest) -> RpcResult;`) +6. Implement the function having a `_call` suffix into `rpc_core::server::service::RpcCoreService`. + +## rpc-grpc + +1. In file `rpc\grpc\proto\rpc.proto`, create a request message and a response message + (ie. `SubmitBlockRequestMessage` and `SubmitBlockResponseMessage`). +2. In file `rpc\grpc\proto\messages.proto`, add respectively a request and a response to the payload of `KaspadRequest` and `KaspadResponse`. + (ie. `SubmitBlockRequestMessage submitBlockRequest = 1003;` and `SubmitBlockResponseMessage submitBlockResponse = 1004;`) +3. In `rpc\grpc\src\convert\message.rs`, implement converters to handle rpc-core <-> rpc-grpc. +4. If appropriate, implement a matcher in `rpc_grpc::client::resolver::matcher`. +5. Complete the `Matcher` trait implementation for `kaspad_request::Payload`. +6. In `rpc\grpc\src\convert\kaspad.rs`, complete the `From` implementations for `RpcApiOps`. +7. In `rpc\grpc\src\convert\kaspad.rs`, add calls to `impl_into_kaspad_request!` and `impl_into_kaspad_response!` + (ie. `impl_into_kaspad_request!(rpc_core::SubmitBlockRequest, SubmitBlockRequestMessage, SubmitBlockRequest);` and + `impl_into_kaspad_response!(rpc_core::SubmitBlockResponse, SubmitBlockResponseMessage, SubmitBlockResponse);`). +8. Implement the function having a `_call` suffix into `rpc_grpc::client::RpcApiGrpc`. +9. In `rpc_grpc::server::service::RpcService::message_stream`, requests handler, add an arm and implement + a handler for the new method. diff --git a/rpc/core/src/api/notifications.rs b/rpc/core/src/api/notifications.rs index 1191683dc..d5b499e03 100644 --- a/rpc/core/src/api/notifications.rs +++ b/rpc/core/src/api/notifications.rs @@ -53,7 +53,10 @@ impl Display for Notification { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Notification::BlockAdded(ref notification) => { - write!(f, "Notification BlockAdded with hash {}", notification.block.verbose_data.hash) + write!(f, "BlockAdded notification with hash {}", notification.block.header.hash) + } + Notification::NewBlockTemplate(_) => { + write!(f, "NewBlockTemplate notification") } _ => write!(f, "Notification type not implemented yet"), // Notification::VirtualSelectedParentChainChanged(_) => todo!(), @@ -63,7 +66,6 @@ impl Display for Notification { // Notification::VirtualSelectedParentBlueScoreChanged(_) => todo!(), // Notification::VirtualDaaScoreChanged(_) => todo!(), // Notification::PruningPointUTXOSetOverride(_) => todo!(), - // Notification::NewBlockTemplate(_) => todo!(), } } } diff --git a/rpc/core/src/api/ops.rs b/rpc/core/src/api/ops.rs index 58b893a32..9fb04a1c6 100644 --- a/rpc/core/src/api/ops.rs +++ b/rpc/core/src/api/ops.rs @@ -37,6 +37,7 @@ pub enum RpcApiOps { // Subscription commands for starting/stopping notifications NotifyBlockAdded, + NotifyNewBlockTemplate, // Server to client notification Notification, diff --git a/rpc/core/src/api/rpc.rs b/rpc/core/src/api/rpc.rs index 6377774d9..9a2f59c27 100644 --- a/rpc/core/src/api/rpc.rs +++ b/rpc/core/src/api/rpc.rs @@ -14,6 +14,11 @@ use crate::{ }; use async_trait::async_trait; +/// Client RPC Api +/// +/// The [`RpcApi`] trait defines RPC calls taking a request message as unique parameter. +/// +/// For each RPC call a matching readily implemented function taking detailed parameters is also provided. #[async_trait] pub trait RpcApi: Sync + Send { // async fn ping( @@ -25,16 +30,19 @@ pub trait RpcApi: Sync + Send { // &self // ) -> RpcResult; - // async fn submit_block( - // &self, - // block: RpcBlock, - // allow_non_daa_blocks : bool - // ) -> RpcResult; + /// Submit a block into the DAG. + /// Blocks are generally expected to have been generated using the get_block_template call. + async fn submit_block(&self, block: RpcBlock, allow_non_daa_blocks: bool) -> RpcResult { + self.submit_block_call(SubmitBlockRequest::new(block, allow_non_daa_blocks)).await + } + async fn submit_block_call(&self, request: SubmitBlockRequest) -> RpcResult; - // async fn get_block_template( - // &self, - // req: GetBlockTemplateRequest - // ) -> RpcResult; + /// Request a current block template. + /// Callers are expected to solve the block template and submit it using the submit_block call. + async fn get_block_template(&self, pay_address: RpcAddress, extra_data: RpcExtraData) -> RpcResult { + self.get_block_template_call(GetBlockTemplateRequest::new(pay_address, extra_data)).await + } + async fn get_block_template_call(&self, request: GetBlockTemplateRequest) -> RpcResult; // async fn get_peer_addresses( // &self @@ -46,7 +54,7 @@ pub trait RpcApi: Sync + Send { // async fn get_mempool_entry( // &self, - // req: GetMempoolEntryRequest + // request: GetMempoolEntryRequest // ) -> RpcResult; // async fn get_mempool_entries( @@ -61,7 +69,7 @@ pub trait RpcApi: Sync + Send { // async fn add_peer( // &self, - // req: AddPeerRequest + // request: AddPeerRequest // ) -> RpcResult; // async fn submit_transaction( @@ -70,36 +78,40 @@ pub trait RpcApi: Sync + Send { // allow_orphan: bool, // ) -> RpcResult; - async fn get_block(&self, req: GetBlockRequest) -> RpcResult; + /// Requests information about a specific block. + async fn get_block(&self, hash: RpcHash, include_transactions: bool) -> RpcResult { + self.get_block_call(GetBlockRequest::new(hash, include_transactions)).await + } + async fn get_block_call(&self, request: GetBlockRequest) -> RpcResult; // async fn get_subnetwork( // &self, - // req: GetSubnetworkRequest + // request: GetSubnetworkRequest // ) -> RpcResult; // async fn get_virtual_selected_parent_chain_from_block( // &self, - // req: GetVirtualSelectedParentChainFromBlockRequest + // request: GetVirtualSelectedParentChainFromBlockRequest // ) -> RpcResult; // async fn get_blocks( // &self, - // req: GetBlocksRequest + // request: GetBlocksRequest // ) -> RpcResult; // async fn get_block_count( // &self, - // req: GetBlockCountRequest + // request: GetBlockCountRequest // ) -> RpcResult; // async fn get_block_dag_info( // &self, - // req: GetBlockDagInfoRequest + // request: GetBlockDagInfoRequest // ) -> RpcResult; // async fn resolve_finality_conflict( // &self, - // req: ResolveFinalityConflictRequest + // request: ResolveFinalityConflictRequest // ) -> RpcResult; // async fn shutdown( @@ -108,7 +120,7 @@ pub trait RpcApi: Sync + Send { // async fn get_headers( // &self, - // req: GetHeadersRequest + // request: GetHeadersRequest // ) -> RpcResult; // async fn get_utxos_by_address( @@ -132,24 +144,27 @@ pub trait RpcApi: Sync + Send { // async fn ban( // &self, - // req: BanRequest + // request: BanRequest // ) -> RpcResult; // async fn unban( // &self, - // req: UnbanRequest + // request: UnbanRequest // ) -> RpcResult; - async fn get_info(&self, req: GetInfoRequest) -> RpcResult; + async fn get_info_call(&self, request: GetInfoRequest) -> RpcResult; + async fn get_info(&self) -> RpcResult { + self.get_info_call(GetInfoRequest {}).await + } // async fn estimate_network_hashes_per_second( // &self, - // req: EstimateNetworkHashesPerSecondRequest + // request: EstimateNetworkHashesPerSecondRequest // ) -> RpcResult; // async fn get_mempool_entries_by_addresses( // &self, - // req: GetMempoolEntriesByAddressesRequest + // request: GetMempoolEntriesByAddressesRequest // ) -> RpcResult; // async fn get_coin_supply( diff --git a/rpc/core/src/convert/block.rs b/rpc/core/src/convert/block.rs index 16c8267d7..7d27918c2 100644 --- a/rpc/core/src/convert/block.rs +++ b/rpc/core/src/convert/block.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use crate::{RpcBlock, RpcBlockVerboseData, RpcError, RpcResult}; -use consensus_core::block::Block; +use crate::{GetBlockTemplateResponse, RpcBlock, RpcError, RpcResult, RpcTransaction}; +use consensus_core::block::{Block, BlockTemplate, MutableBlock}; // ---------------------------------------------------------------------------- // consensus_core to rpc_core @@ -9,25 +9,32 @@ use consensus_core::block::Block; impl From<&Block> for RpcBlock { fn from(item: &Block) -> Self { - Self { header: (&*item.header).into(), transactions: vec![], verbose_data: item.into() } + Self { + header: (&*item.header).into(), + transactions: item.transactions.iter().map(RpcTransaction::from).collect(), + // TODO: Implement a populating process inspired from kaspad\app\rpc\rpccontext\verbosedata.go + verbose_data: None, + } } } -impl From<&Block> for RpcBlockVerboseData { - fn from(item: &Block) -> Self { - // TODO: Fill all fields with real values. - // see kaspad\app\rpc\rpccontext\verbosedata.go PopulateBlockWithVerboseData +impl From<&MutableBlock> for RpcBlock { + fn from(item: &MutableBlock) -> Self { Self { - hash: item.hash(), - difficulty: 0.into(), - selected_parent_hash: 0.into(), - transaction_ids: vec![], - is_header_only: true, - blue_score: 0u64, - children_hashes: vec![], - merge_set_blues_hashes: vec![], - merge_set_reds_hashes: vec![], - is_chain_block: false, + header: (&item.header).into(), + transactions: item.transactions.iter().map(RpcTransaction::from).collect(), + verbose_data: None, + } + } +} + +impl From<&BlockTemplate> for GetBlockTemplateResponse { + fn from(item: &BlockTemplate) -> Self { + Self { + block: (&item.block).into(), + // TODO: either call some Block.is_synced() if/when available or implement + // a functional equivalent here based on item.selected_parent_timestamp + is_synced: true, } } } @@ -41,10 +48,12 @@ impl TryFrom<&RpcBlock> for Block { fn try_from(item: &RpcBlock) -> RpcResult { Ok(Self { header: Arc::new((&item.header).try_into()?), - - // TODO: Implement converters for all tx structs and fill transactions - // with real values. - transactions: Arc::new(vec![]), // FIXME + transactions: Arc::new( + item.transactions + .iter() + .map(consensus_core::tx::Transaction::try_from) + .collect::>>()?, + ), }) } } diff --git a/rpc/core/src/convert/header.rs b/rpc/core/src/convert/header.rs index f881e8366..37be3d856 100644 --- a/rpc/core/src/convert/header.rs +++ b/rpc/core/src/convert/header.rs @@ -1,19 +1,20 @@ -use crate::{RpcBlockHeader, RpcBlockLevelParents, RpcError, RpcResult}; +use crate::{RpcBlockLevelParents, RpcError, RpcHeader, RpcResult}; use consensus_core::header::Header; // ---------------------------------------------------------------------------- // consensus_core to rpc_core // ---------------------------------------------------------------------------- -impl From<&Header> for RpcBlockHeader { +impl From<&Header> for RpcHeader { fn from(item: &Header) -> Self { Self { - version: item.version.into(), + hash: item.hash, + version: item.version, parents: item.parents_by_level.iter().map(|x| RpcBlockLevelParents { parent_hashes: x.clone() }).collect(), hash_merkle_root: item.hash_merkle_root, accepted_id_merkle_root: item.accepted_id_merkle_root, utxo_commitment: item.utxo_commitment, - timestamp: item.timestamp.try_into().expect("time stamp is convertible from u64 to i64"), + timestamp: item.timestamp, bits: item.bits, nonce: item.nonce, daa_score: item.daa_score, @@ -28,22 +29,23 @@ impl From<&Header> for RpcBlockHeader { // rpc_core to consensus_core // ---------------------------------------------------------------------------- -impl TryFrom<&RpcBlockHeader> for Header { +impl TryFrom<&RpcHeader> for Header { type Error = RpcError; - fn try_from(item: &RpcBlockHeader) -> RpcResult { - Ok(Self::new( - item.version.try_into()?, + fn try_from(item: &RpcHeader) -> RpcResult { + let header = Self::new( + item.version, item.parents.iter().map(|x| x.parent_hashes.clone()).collect(), item.hash_merkle_root, item.accepted_id_merkle_root, item.utxo_commitment, - item.timestamp.try_into()?, + item.timestamp, item.bits, item.nonce, item.daa_score, item.blue_work.into(), item.blue_score, item.pruning_point, - )) + ); + Ok(header) } } diff --git a/rpc/core/src/convert/mod.rs b/rpc/core/src/convert/mod.rs index db9dcc041..4c9135549 100644 --- a/rpc/core/src/convert/mod.rs +++ b/rpc/core/src/convert/mod.rs @@ -1,3 +1,4 @@ pub mod block; pub mod header; pub mod notification; +pub mod tx; diff --git a/rpc/core/src/convert/notification.rs b/rpc/core/src/convert/notification.rs index 7e031ed89..050b0fab5 100644 --- a/rpc/core/src/convert/notification.rs +++ b/rpc/core/src/convert/notification.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{notify::collector::ArcConvert, BlockAddedNotification, Notification}; +use crate::{notify::collector::ArcConvert, BlockAddedNotification, NewBlockTemplateNotification, Notification}; use consensus_core::notify as consensus_notify; // ---------------------------------------------------------------------------- @@ -11,6 +11,7 @@ impl From<&consensus_notify::Notification> for Notification { fn from(item: &consensus_notify::Notification) -> Self { match item { consensus_notify::Notification::BlockAdded(msg) => Notification::BlockAdded(msg.into()), + consensus_notify::Notification::NewBlockTemplate(msg) => Notification::NewBlockTemplate(msg.into()), } } } @@ -21,6 +22,12 @@ impl From<&consensus_notify::BlockAddedNotification> for BlockAddedNotification } } +impl From<&consensus_notify::NewBlockTemplateNotification> for NewBlockTemplateNotification { + fn from(_: &consensus_notify::NewBlockTemplateNotification) -> Self { + Self {} + } +} + /// Pseudo conversion from Arc to Arc. /// This is basically a clone() op. impl From> for Arc { diff --git a/rpc/core/src/convert/tx.rs b/rpc/core/src/convert/tx.rs new file mode 100644 index 000000000..82007609a --- /dev/null +++ b/rpc/core/src/convert/tx.rs @@ -0,0 +1,131 @@ +use crate::{ + RpcError, RpcResult, RpcScriptPublicKey, RpcScriptVec, RpcTransaction, RpcTransactionInput, RpcTransactionOutput, RpcUtxoEntry, +}; +use consensus_core::tx::{ScriptPublicKey, ScriptVec, Transaction, TransactionInput, TransactionOutput, UtxoEntry}; + +// ---------------------------------------------------------------------------- +// consensus_core to rpc_core +// ---------------------------------------------------------------------------- + +impl From<&Transaction> for RpcTransaction { + fn from(item: &Transaction) -> Self { + Self { + version: item.version, + inputs: item.inputs.iter().map(RpcTransactionInput::from).collect(), + outputs: item.outputs.iter().map(RpcTransactionOutput::from).collect(), + lock_time: item.lock_time, + subnetwork_id: item.subnetwork_id.clone(), + gas: item.gas, + payload: (&item.payload).into(), + // TODO: Implement a populating process inspired from kaspad\app\rpc\rpccontext\verbosedata.go + verbose_data: None, + } + } +} + +impl From<&TransactionOutput> for RpcTransactionOutput { + fn from(item: &TransactionOutput) -> Self { + Self { + value: item.value, + script_public_key: (&item.script_public_key).into(), + // TODO: Implement a populating process inspired from kaspad\app\rpc\rpccontext\verbosedata.go + verbose_data: None, + } + } +} + +impl From<&TransactionInput> for RpcTransactionInput { + fn from(item: &TransactionInput) -> Self { + Self { + previous_outpoint: item.previous_outpoint, + signature_script: (&item.signature_script).into(), + sequence: item.sequence, + sig_op_count: item.sig_op_count, + // TODO: Implement a populating process inspired from kaspad\app\rpc\rpccontext\verbosedata.go + verbose_data: None, + } + } +} + +impl From<&UtxoEntry> for RpcUtxoEntry { + fn from(item: &UtxoEntry) -> Self { + Self { + amount: item.amount, + script_public_key: (&item.script_public_key).into(), + block_daa_score: item.block_daa_score, + is_coinbase: item.is_coinbase, + } + } +} + +impl From<&ScriptPublicKey> for RpcScriptPublicKey { + fn from(item: &ScriptPublicKey) -> Self { + Self { version: item.version(), script_public_key: item.script().into() } + } +} + +impl From<&ScriptVec> for RpcScriptVec { + fn from(item: &ScriptVec) -> Self { + (&item.clone().into_vec()).into() + } +} + +// ---------------------------------------------------------------------------- +// rpc_core to consensus_core +// ---------------------------------------------------------------------------- + +impl TryFrom<&RpcTransaction> for Transaction { + type Error = RpcError; + fn try_from(item: &RpcTransaction) -> RpcResult { + Ok(Transaction::new( + item.version, + item.inputs + .iter() + .map(consensus_core::tx::TransactionInput::try_from) + .collect::>>()?, + item.outputs + .iter() + .map(consensus_core::tx::TransactionOutput::try_from) + .collect::>>()?, + item.lock_time, + item.subnetwork_id.clone(), + item.gas, + item.payload.as_ref().clone(), + )) + } +} + +impl TryFrom<&RpcTransactionOutput> for TransactionOutput { + type Error = RpcError; + fn try_from(item: &RpcTransactionOutput) -> RpcResult { + Ok(Self::new(item.value, (&item.script_public_key).try_into()?)) + } +} + +impl TryFrom<&RpcTransactionInput> for TransactionInput { + type Error = RpcError; + fn try_from(item: &RpcTransactionInput) -> RpcResult { + Ok(Self::new(item.previous_outpoint, item.signature_script.as_ref().clone(), item.sequence, item.sig_op_count)) + } +} + +impl TryFrom<&RpcUtxoEntry> for UtxoEntry { + type Error = RpcError; + fn try_from(item: &RpcUtxoEntry) -> RpcResult { + Ok(Self::new(item.amount, (&item.script_public_key).try_into()?, item.block_daa_score, item.is_coinbase)) + } +} + +impl TryFrom<&RpcScriptPublicKey> for ScriptPublicKey { + type Error = RpcError; + fn try_from(item: &RpcScriptPublicKey) -> RpcResult { + Ok(Self::new(item.version, (&item.script_public_key).try_into()?)) + } +} + +impl TryFrom<&RpcScriptVec> for ScriptVec { + type Error = RpcError; + fn try_from(item: &RpcScriptVec) -> RpcResult { + Ok(ScriptVec::from_slice(item.as_ref().as_slice())) + } +} diff --git a/rpc/core/src/errors.rs b/rpc/core/src/errors.rs index 8486b46b4..8b3e0f08a 100644 --- a/rpc/core/src/errors.rs +++ b/rpc/core/src/errors.rs @@ -27,6 +27,12 @@ pub enum RpcError { #[error("Feature not supported")] UnsupportedFeature, + #[error("Primitive to enum conversion error")] + PrimitiveToEnumConversionError, + + #[error(transparent)] + AddressError(#[from] addresses::AddressError), + #[error("{0}")] General(String), } diff --git a/rpc/core/src/lib.rs b/rpc/core/src/lib.rs index f721794b3..56b4b9d45 100644 --- a/rpc/core/src/lib.rs +++ b/rpc/core/src/lib.rs @@ -11,6 +11,7 @@ pub mod stubs; pub mod prelude { pub use super::api::notifications::*; + pub use super::model::address::*; pub use super::model::block::*; pub use super::model::blue_work::*; pub use super::model::hash::*; @@ -25,6 +26,7 @@ pub mod prelude { pub use api::notifications::*; pub use convert::*; pub use errors::*; +pub use model::address::*; pub use model::block::*; pub use model::blue_work::*; pub use model::hash::*; diff --git a/rpc/core/src/model/address.rs b/rpc/core/src/model/address.rs new file mode 100644 index 000000000..53f42045e --- /dev/null +++ b/rpc/core/src/model/address.rs @@ -0,0 +1 @@ +pub type RpcAddress = addresses::Address; diff --git a/rpc/core/src/model/block.rs b/rpc/core/src/model/block.rs index 6085a2420..1301ff9d6 100644 --- a/rpc/core/src/model/block.rs +++ b/rpc/core/src/model/block.rs @@ -1,14 +1,14 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use serde::{Deserialize, Serialize}; -use crate::prelude::{RpcBlockHeader, RpcHash, RpcTransaction}; +use crate::prelude::{RpcHash, RpcHeader, RpcTransaction}; #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] pub struct RpcBlock { - pub header: RpcBlockHeader, + pub header: RpcHeader, pub transactions: Vec, - pub verbose_data: RpcBlockVerboseData, + pub verbose_data: Option, } #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] diff --git a/rpc/core/src/model/header.rs b/rpc/core/src/model/header.rs index 01700a9bf..801bff3aa 100644 --- a/rpc/core/src/model/header.rs +++ b/rpc/core/src/model/header.rs @@ -2,15 +2,18 @@ use crate::{prelude::RpcHash, RpcBlueWorkType}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use serde::{Deserialize, Serialize}; +// TODO: Make RpcHeader an alias of consensus-core::Header + #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] -pub struct RpcBlockHeader { - pub version: u32, +pub struct RpcHeader { + pub hash: RpcHash, // Cached hash + pub version: u16, pub parents: Vec, pub hash_merkle_root: RpcHash, pub accepted_id_merkle_root: RpcHash, pub utxo_commitment: RpcHash, - pub timestamp: i64, + pub timestamp: u64, pub bits: u32, pub nonce: u64, pub daa_score: u64, diff --git a/rpc/core/src/model/hex_data.rs b/rpc/core/src/model/hex_data.rs index b51eeb834..5b272f9e7 100644 --- a/rpc/core/src/model/hex_data.rs +++ b/rpc/core/src/model/hex_data.rs @@ -26,6 +26,12 @@ impl fmt::Display for RpcHexData { } } +impl From<&[u8]> for RpcHexData { + fn from(item: &[u8]) -> Self { + RpcHexData(item.into()) + } +} + impl From<&Vec> for RpcHexData { fn from(item: &Vec) -> RpcHexData { RpcHexData(item.clone()) diff --git a/rpc/core/src/model/message.rs b/rpc/core/src/model/message.rs index 7aa98a9a5..0c54c0a36 100644 --- a/rpc/core/src/model/message.rs +++ b/rpc/core/src/model/message.rs @@ -1,8 +1,97 @@ +use std::fmt::{Display, Formatter}; + use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use serde::{Deserialize, Serialize}; -use crate::{api::ops::SubscribeCommand, RpcBlock, RpcHash}; +use crate::{api::ops::SubscribeCommand, RpcAddress, RpcBlock, RpcHash}; + +pub type RpcExtraData = Vec; + +/// SubmitBlockRequest requests to submit a block into the DAG. +/// Blocks are generally expected to have been generated using the getBlockTemplate call. +/// +/// See: [`GetBlockTemplateRequest`] +#[derive(Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] +#[serde(rename_all = "camelCase")] +pub struct SubmitBlockRequest { + pub block: RpcBlock, + #[serde(alias = "allowNonDAABlocks")] + pub allow_non_daa_blocks: bool, +} +impl SubmitBlockRequest { + pub fn new(block: RpcBlock, allow_non_daa_blocks: bool) -> Self { + Self { block, allow_non_daa_blocks } + } +} + +#[derive(Eq, PartialEq, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] +#[serde(rename_all = "camelCase")] +pub enum SubmitBlockRejectReason { + // None = 0, + BlockInvalid = 1, + IsInIBD = 2, +} +impl SubmitBlockRejectReason { + fn as_str(&self) -> &'static str { + // see app\appmessage\rpc_submit_block.go, line 35 + match self { + SubmitBlockRejectReason::BlockInvalid => "Block is invalid", + SubmitBlockRejectReason::IsInIBD => "Node is in IBD", + } + } +} +impl Display for SubmitBlockRejectReason { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Eq, PartialEq, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] +#[serde(rename_all = "camelCase")] +pub enum SubmitBlockReport { + Success, + Reject(SubmitBlockRejectReason), +} +impl SubmitBlockReport { + pub fn is_success(&self) -> bool { + *self == SubmitBlockReport::Success + } +} + +#[derive(Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] +#[serde(rename_all = "camelCase")] +pub struct SubmitBlockResponse { + pub report: SubmitBlockReport, +} +/// GetBlockTemplateRequest requests a current block template. +/// Callers are expected to solve the block template and submit it using the submitBlock call +/// +/// See: [`SubmitBlockRequest`] +#[derive(Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] +#[serde(rename_all = "camelCase")] +pub struct GetBlockTemplateRequest { + /// Which kaspa address should the coinbase block reward transaction pay into + pub pay_address: RpcAddress, + pub extra_data: RpcExtraData, +} +impl GetBlockTemplateRequest { + pub fn new(pay_address: RpcAddress, extra_data: RpcExtraData) -> Self { + Self { pay_address, extra_data } + } +} + +#[derive(Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] +#[serde(rename_all = "camelCase")] +pub struct GetBlockTemplateResponse { + pub block: RpcBlock, + + /// Whether kaspad thinks that it's synced. + /// Callers are discouraged (but not forbidden) from solving blocks when kaspad is not synced. + /// That is because when kaspad isn't in sync with the rest of the network there's a high + /// chance the block will never be accepted, thus the solving effort would have been wasted. + pub is_synced: bool, +} /// GetBlockRequest requests information about a specific block #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] @@ -13,6 +102,11 @@ pub struct GetBlockRequest { /// Whether to include transaction data in the response pub include_transactions: bool, } +impl GetBlockRequest { + pub fn new(hash: RpcHash, include_transactions: bool) -> Self { + Self { hash, include_transactions } + } +} #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] @@ -30,6 +124,11 @@ pub struct GetBlockResponse { pub struct NotifyBlockAddedRequest { pub command: SubscribeCommand, } +impl NotifyBlockAddedRequest { + pub fn new(command: SubscribeCommand) -> Self { + Self { command } + } +} #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] @@ -60,3 +159,29 @@ pub struct GetInfoResponse { pub is_synced: bool, pub has_notify_command: bool, } + +/// NotifyNewBlockTemplateRequest registers this connection for blockAdded notifications. +/// +/// See: [`NewBlockTemplateNotification`] +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] +#[serde(rename_all = "camelCase")] +pub struct NotifyNewBlockTemplateRequest { + pub command: SubscribeCommand, +} +impl NotifyNewBlockTemplateRequest { + pub fn new(command: SubscribeCommand) -> Self { + Self { command } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] +#[serde(rename_all = "camelCase")] +pub struct NotifyNewBlockTemplateResponse {} + +/// NewBlockTemplateNotification is sent whenever a blocks has been added (NOT accepted) +/// into the DAG. +/// +/// See: [`NotifyNewBlockTemplateRequest`] +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] +#[serde(rename_all = "camelCase")] +pub struct NewBlockTemplateNotification {} diff --git a/rpc/core/src/model/mod.rs b/rpc/core/src/model/mod.rs index fb6c20a9f..20fe320c9 100644 --- a/rpc/core/src/model/mod.rs +++ b/rpc/core/src/model/mod.rs @@ -1,3 +1,4 @@ +pub mod address; pub mod block; pub mod blue_work; pub mod hash; @@ -8,6 +9,7 @@ pub mod script_class; pub mod subnets; pub mod tx; +pub use address::*; pub use block::*; pub use blue_work::*; pub use hash::*; diff --git a/rpc/core/src/model/tx.rs b/rpc/core/src/model/tx.rs index ce0fae341..0a4b077df 100644 --- a/rpc/core/src/model/tx.rs +++ b/rpc/core/src/model/tx.rs @@ -1,65 +1,95 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use consensus_core::tx::TransactionId; +use consensus_core::tx::{TransactionId, TransactionOutpoint}; use serde::{Deserialize, Serialize}; use crate::prelude::{RpcHash, RpcHexData, RpcScriptClass, RpcSubnetworkId}; +/// Represents the ID of a Kaspa transaction pub type RpcTransactionId = TransactionId; +pub type RpcScriptVec = RpcHexData; + +/// Represents a Kaspad ScriptPublicKey +/// +/// This should be an alias of [`consensus_core::tx::ScriptPublicKey`] but +/// is not because its script field of type [`consensus_core::tx::ScriptVec`] +/// is a `smallvec::SmallVec` which does not implement the borsh traits. #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] -pub struct RpcTransaction { - pub version: u32, - pub inputs: Vec, - pub outputs: Vec, - pub lock_time: u64, - pub subnetwork_id: RpcSubnetworkId, - pub gas: u64, - pub payload: RpcHexData, - pub verbose_data: RpcTransactionVerboseData, +pub struct RpcScriptPublicKey { + pub version: u16, + pub script_public_key: RpcHexData, } +/// Holds details about an individual transaction output in a utxo +/// set such as whether or not it was contained in a coinbase tx, the daa +/// score of the block that accepts the tx, its public key script, and how +/// much it pays. +/// +/// This should be an alias of [`consensus_core::tx::UtxoEntry`] but is not +/// because of the indirectuse of a `smallvec::SmallVec` by `script_public_key`. +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] +#[serde(rename_all = "camelCase")] +pub struct RpcUtxoEntry { + pub amount: u64, + pub script_public_key: RpcScriptPublicKey, + pub block_daa_score: u64, + pub is_coinbase: bool, +} + +/// Represents a Kaspa transaction outpoint +pub type RpcTransactionOutpoint = TransactionOutpoint; + +/// Represents a Kaspa transaction input #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] pub struct RpcTransactionInput { - pub previous_outpoint: RpcOutpoint, + pub previous_outpoint: RpcTransactionOutpoint, pub signature_script: RpcHexData, pub sequence: u64, - pub sig_op_count: u32, + pub sig_op_count: u8, pub verbose_data: Option, } +/// Represent Kaspa transaction input verbose data #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] -pub struct RpcTransactionOutput { - pub amount: u64, - pub script_public_key: RpcScriptPublicKey, - pub verbose_data: RpcTransactionOutputVerboseData, -} +pub struct RpcTransactionInputVerboseData {} +/// Represents a Kaspad transaction output #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] -pub struct RpcOutpoint { - pub transaction_id: RpcTransactionId, - pub index: u32, +pub struct RpcTransactionOutput { + pub value: u64, + pub script_public_key: RpcScriptPublicKey, + pub verbose_data: Option, } +/// Represent Kaspa transaction output verbose data #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] -pub struct RpcUtxoEntry { - pub amount: u64, - pub script_public_key: RpcScriptPublicKey, - pub block_daa_score: u64, - pub is_coinbase: bool, +pub struct RpcTransactionOutputVerboseData { + pub script_public_key_type: RpcScriptClass, + + // TODO: change the type of this field for a better binary representation + pub script_public_key_address: String, } +/// Represents a Kaspa transaction #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] -pub struct RpcScriptPublicKey { - pub script_public_key: RpcHexData, +pub struct RpcTransaction { pub version: u16, + pub inputs: Vec, + pub outputs: Vec, + pub lock_time: u64, + pub subnetwork_id: RpcSubnetworkId, + pub gas: u64, + pub payload: RpcHexData, + pub verbose_data: Option, } +/// Represent Kaspa transaction verbose data #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] #[serde(rename_all = "camelCase")] pub struct RpcTransactionVerboseData { @@ -69,16 +99,3 @@ pub struct RpcTransactionVerboseData { pub block_hash: RpcHash, pub block_time: u64, } - -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] -#[serde(rename_all = "camelCase")] -pub struct RpcTransactionInputVerboseData {} - -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] -#[serde(rename_all = "camelCase")] -pub struct RpcTransactionOutputVerboseData { - pub script_public_key_type: RpcScriptClass, - - // TODO: change the type of this field for a better binary representation - pub script_public_key_address: String, -} diff --git a/rpc/core/src/notify/notifier.rs b/rpc/core/src/notify/notifier.rs index 363610992..e9ba1d10e 100644 --- a/rpc/core/src/notify/notifier.rs +++ b/rpc/core/src/notify/notifier.rs @@ -59,7 +59,6 @@ impl Notifier { } pub fn start_notify(&self, id: ListenerID, notification_type: NotificationType) -> Result<()> { - trace!("[Notifier] start sending to listener {0} notifications of type {1:?}", id, notification_type); self.inner.clone().start_notify(id, notification_type) } @@ -68,7 +67,6 @@ impl Notifier { } pub fn stop_notify(&self, id: ListenerID, notification_type: NotificationType) -> Result<()> { - trace!("[Notifier] stop sending to listener {0} notifications of type {1:?}", id, notification_type); self.inner.clone().stop_notify(id, notification_type) } @@ -144,6 +142,9 @@ impl Inner { if let Some(ref collector) = self.collector.clone().as_ref() { collector.clone().start(notifier); } + trace!("[Notifier] started"); + } else { + trace!("[Notifier] start ignored since already started"); } } @@ -170,10 +171,10 @@ impl Inner { // This is necessary for the correct handling of repeating start/stop cycles. workflow_core::task::spawn(async move { - trace!("[Notifier] dispatch_task spawned"); + trace!("[Notifier] dispatcher_task starting for notification type {:?}", event); fn send_subscribe_message(send_subscriber: Sender, message: SubscribeMessage) { - trace!("[Notifier] dispatch_task send subscribe message: {:?}", message); + trace!("[Notifier] dispatcher_task send subscribe message: {:?}", message); match send_subscriber.try_send(message) { Ok(_) => {} Err(err) => { @@ -183,7 +184,7 @@ impl Inner { } // We will send subscribe messages for all dispatch messages if event is a filtered UtxosChanged. - // Otherwise, subscribe message is only sent when needed by the execution of the dispatche message. + // Otherwise, subscribe message is only sent when needed by the execution of the dispatch message. let report_all_changes = event == EventType::UtxosChanged && sending_changed_utxos == ListenerUtxoNotificationFilterSetting::FilteredByAddress; @@ -192,7 +193,7 @@ impl Inner { // If needed, send subscribe message based on listeners map being empty or not if need_subscribe && has_subscriber { if listeners.len() > 0 { - // TODO: handle actual utxo addresse set + // TODO: handle actual utxo address set send_subscribe_message(send_subscriber.as_ref().unwrap().clone(), SubscribeMessage::StartEvent(event.into())); } else { @@ -248,6 +249,7 @@ impl Inner { } } shutdown_trigger.trigger(); + trace!("[Notifier] dispatcher_task exiting for notification type {:?}", event); }); } @@ -295,7 +297,7 @@ impl Inner { let event: EventType = (¬ification_type).into(); let mut listeners = self.listeners.lock().unwrap(); if let Some(listener) = listeners.get_mut(&id) { - trace!("[Notifier] start notify to {0} about {1:?}", id, notification_type); + trace!("[Notifier] start notifying to {0} about {1:?}", id, notification_type); // Any mutation in the listener will trigger a dispatch of a brand new ListenerSenderSide // eventually creating or replacing this listener in the matching dispatcher. @@ -320,9 +322,8 @@ impl Inner { let event: EventType = (¬ification_type).into(); let mut listeners = self.listeners.lock().unwrap(); if let Some(listener) = listeners.get_mut(&id) { - trace!("[Notifier] stop notify to {0} about {1:?}", id, notification_type); - - if listener.toggle(notification_type, false) { + if listener.toggle(notification_type.clone(), false) { + trace!("[Notifier] stop notifying to {0} about {1:?}", id, notification_type); let msg = DispatchMessage::RemoveListener(listener.id()); self.clone().try_send_dispatch(event, msg)?; } @@ -363,8 +364,10 @@ impl Inner { subscriber.clone().stop().await?; } } else { + trace!("[Notifier] stop ignored since already stopped"); return Err(Error::AlreadyStoppedError); } + trace!("[Notifier] stopped"); Ok(()) } } diff --git a/rpc/core/src/server/mod.rs b/rpc/core/src/server/mod.rs index a9fef4229..628313aaa 100644 --- a/rpc/core/src/server/mod.rs +++ b/rpc/core/src/server/mod.rs @@ -1,2 +1,81 @@ +use std::sync::Arc; + +use consensus_core::api::DynConsensus; +use kaspa_core::{ + task::service::{AsyncService, AsyncServiceFuture}, + trace, +}; +use kaspa_utils::triggers::DuplexTrigger; + +use self::{collector::ConsensusNotificationReceiver, service::RpcCoreService}; + pub mod collector; pub mod service; + +const RPC_CORE_SERVICE: &str = "rpc-core-service"; + +/// [`RpcCoreServer`] encapsulates and exposes a [`RpcCoreService`] as an [`AsyncService`]. +pub struct RpcCoreServer { + service: Arc, + shutdown: DuplexTrigger, +} + +impl RpcCoreServer { + pub fn new(consensus: DynConsensus, consensus_recv: ConsensusNotificationReceiver) -> Self { + let service = Arc::new(RpcCoreService::new(consensus, consensus_recv)); + Self { service, shutdown: DuplexTrigger::default() } + } + + #[inline(always)] + pub fn service(&self) -> Arc { + self.service.clone() + } +} + +// It might be necessary to opt this out in the context of wasm32 + +impl AsyncService for RpcCoreServer { + fn ident(self: Arc) -> &'static str { + RPC_CORE_SERVICE + } + + fn start(self: Arc) -> AsyncServiceFuture { + trace!("{} starting", RPC_CORE_SERVICE); + let service = self.service.clone(); + + // Prepare a start shutdown signal receiver and a shutwdown ended signal sender + let shutdown_signal = self.shutdown.request.listener.clone(); + let shutdown_executed = self.shutdown.response.trigger.clone(); + + // Launch the service and wait for a shutdown signal + Box::pin(async move { + service.start(); + shutdown_signal.await; + shutdown_executed.trigger(); + }) + } + + fn signal_exit(self: Arc) { + trace!("sending an exit signal to {}", RPC_CORE_SERVICE); + self.shutdown.request.trigger.trigger(); + } + + fn stop(self: Arc) -> AsyncServiceFuture { + trace!("{} stopping", RPC_CORE_SERVICE); + let service = self.service.clone(); + let shutdown_executed_signal = self.shutdown.response.listener.clone(); + Box::pin(async move { + // Wait for the service start task to exit + shutdown_executed_signal.await; + + // Stop the service + match service.stop().await { + Ok(_) => {} + Err(err) => { + trace!("Error while stopping {}: {}", RPC_CORE_SERVICE, err); + } + } + trace!("{} exiting", RPC_CORE_SERVICE); + }) + } +} diff --git a/rpc/core/src/server/service.rs b/rpc/core/src/server/service.rs index 25bd3c0c8..9737d9a45 100644 --- a/rpc/core/src/server/service.rs +++ b/rpc/core/src/server/service.rs @@ -2,17 +2,24 @@ use super::collector::{ConsensusCollector, ConsensusNotificationReceiver}; use crate::{ - api::rpc, + api::rpc::RpcApi, model::*, notify::{ channel::NotificationChannel, listener::{ListenerID, ListenerReceiverSide, ListenerUtxoNotificationFilterSetting}, notifier::Notifier, }, - NotificationType, RpcError, RpcResult, + Notification, NotificationType, RpcError, RpcResult, }; use async_trait::async_trait; +use consensus_core::{ + api::DynConsensus, + block::Block, + coinbase::MinerData, + tx::{ScriptPublicKey, ScriptVec}, +}; use hashes::Hash; +use kaspa_core::trace; use std::{ str::FromStr, sync::Arc, @@ -23,7 +30,7 @@ use std::{ /// A service implementing the Rpc API at rpc_core level. /// /// Collects notifications from the consensus and forwards them to -/// actual protocol-featured services. Thanks to the subscribtion pattern, +/// actual protocol-featured services. Thanks to the subscription pattern, /// notifications are sent to the registered services only if the actually /// need them. /// @@ -37,13 +44,15 @@ use std::{ /// from this instance to registered services and backwards should occur /// by adding respectively to the registered service a Collector and a /// Subscriber. -#[derive(Debug)] pub struct RpcCoreService { + consensus: DynConsensus, notifier: Arc, } impl RpcCoreService { - pub fn new(consensus_recv: ConsensusNotificationReceiver) -> Arc { + pub fn new(consensus: DynConsensus, consensus_recv: ConsensusNotificationReceiver) -> Self { + // TODO: instead of getting directly a DynConsensus, rely on some Context equivalent + // See app\rpc\rpccontext\context.go // TODO: the channel receiver should be obtained by registering to a consensus notification service let collector = Arc::new(ConsensusCollector::new(consensus_recv)); @@ -51,7 +60,7 @@ impl RpcCoreService { // TODO: Some consensus-compatible subscriber could be provided here let notifier = Arc::new(Notifier::new(Some(collector), None, ListenerUtxoNotificationFilterSetting::All)); - Arc::new(Self { notifier }) + Self { consensus, notifier } } pub fn start(&self) { @@ -69,8 +78,52 @@ impl RpcCoreService { } #[async_trait] -impl rpc::RpcApi for RpcCoreService { - async fn get_block(&self, req: GetBlockRequest) -> RpcResult { +impl RpcApi for RpcCoreService { + async fn submit_block_call(&self, request: SubmitBlockRequest) -> RpcResult { + let try_block: RpcResult = (&request.block).try_into(); + if let Err(ref err) = try_block { + trace!("incoming SubmitBlockRequest with block conversion error: {}", err); + } + let block = try_block?; + trace!("incoming SubmitBlockRequest for block {}", block.header.hash); + + let result = match self.consensus.clone().validate_and_insert_block(block, true).await { + Ok(_) => Ok(SubmitBlockResponse { report: SubmitBlockReport::Success }), + Err(err) => { + trace!("submit block error: {}", err); + Ok(SubmitBlockResponse { report: SubmitBlockReport::Reject(SubmitBlockRejectReason::BlockInvalid) }) + } // TODO: handle also the IsInIBD reject reason + }; + + // Emit a NewBlockTemplate notification + self.notifier.clone().notify(Arc::new(Notification::NewBlockTemplate(NewBlockTemplateNotification {}))).unwrap(); + + result + } + + async fn get_block_template_call(&self, request: GetBlockTemplateRequest) -> RpcResult { + trace!("incoming GetBlockTemplate request"); + + // TODO: Replace this hack by a call to build the script (some txscript.PayToAddrScript(payAddress) equivalent). + // See app\rpc\rpchandlers\get_block_template.go HandleGetBlockTemplate + const ADDRESS_PUBLIC_KEY_SCRIPT_PUBLIC_KEY_VERSION: u16 = 0; + const OP_CHECK_SIG: u8 = 172; + let mut script_addr = request.pay_address.payload.clone(); + let mut pay_to_pub_key_script = Vec::with_capacity(34); + pay_to_pub_key_script.push(u8::try_from(script_addr.len()).unwrap()); + pay_to_pub_key_script.append(&mut script_addr); + pay_to_pub_key_script.push(OP_CHECK_SIG); + + let script = ScriptVec::from_vec(pay_to_pub_key_script); + + let script_public_key = ScriptPublicKey::new(ADDRESS_PUBLIC_KEY_SCRIPT_PUBLIC_KEY_VERSION, script); + let miner_data: MinerData = MinerData::new(script_public_key, request.extra_data); + let block_template = self.consensus.clone().build_block_template(miner_data, vec![]); + + Ok((&block_template).into()) + } + + async fn get_block_call(&self, req: GetBlockRequest) -> RpcResult { // TODO: Remove the following test when consensus is used to fetch data // This is a test to simulate a consensus error @@ -82,7 +135,7 @@ impl rpc::RpcApi for RpcCoreService { Ok(GetBlockResponse { block: create_dummy_rpc_block() }) } - async fn get_info(&self, _req: GetInfoRequest) -> RpcResult { + async fn get_info_call(&self, _req: GetInfoRequest) -> RpcResult { // TODO: query info from consensus and use it to build the response Ok(GetInfoResponse { p2p_id: "test".to_string(), @@ -127,13 +180,14 @@ impl rpc::RpcApi for RpcCoreService { fn create_dummy_rpc_block() -> RpcBlock { let sel_parent_hash = Hash::from_str("5963be67f12da63004ce1baceebd7733c4fb601b07e9b0cfb447a3c5f4f3c4f0").unwrap(); RpcBlock { - header: RpcBlockHeader { + header: RpcHeader { + hash: Hash::from_str("8270e63a0295d7257785b9c9b76c9a2efb7fb8d6ac0473a1bff1571c5030e995").unwrap(), version: 1, parents: vec![], hash_merkle_root: Hash::from_str("4b5a041951c4668ecc190c6961f66e54c1ce10866bef1cf1308e46d66adab270").unwrap(), accepted_id_merkle_root: Hash::from_str("1a1310d49d20eab15bf62c106714bdc81e946d761701e81fabf7f35e8c47b479").unwrap(), utxo_commitment: Hash::from_str("e7cdeaa3a8966f3fff04e967ed2481615c76b7240917c5d372ee4ed353a5cc15").unwrap(), - timestamp: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as i64, + timestamp: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u64, bits: 1, nonce: 1234, daa_score: 123456, @@ -142,7 +196,7 @@ fn create_dummy_rpc_block() -> RpcBlock { blue_score: 12345678901, }, transactions: vec![], - verbose_data: RpcBlockVerboseData { + verbose_data: Some(RpcBlockVerboseData { hash: Hash::from_str("8270e63a0295d7257785b9c9b76c9a2efb7fb8d6ac0473a1bff1571c5030e995").unwrap(), difficulty: 5678.0, selected_parent_hash: sel_parent_hash, @@ -153,6 +207,6 @@ fn create_dummy_rpc_block() -> RpcBlock { merge_set_blues_hashes: vec![], merge_set_reds_hashes: vec![], is_chain_block: true, - }, + }), } } diff --git a/rpc/core/src/stubs.rs b/rpc/core/src/stubs.rs index 711d72ea8..0d9af80de 100644 --- a/rpc/core/src/stubs.rs +++ b/rpc/core/src/stubs.rs @@ -38,6 +38,3 @@ pub struct VirtualDaaScoreChangedNotification; #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] pub struct PruningPointUTXOSetOverrideNotification; - -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema)] -pub struct NewBlockTemplateNotification; diff --git a/rpc/grpc/Cargo.toml b/rpc/grpc/Cargo.toml index 081a3f9bd..dc4e49344 100644 --- a/rpc/grpc/Cargo.toml +++ b/rpc/grpc/Cargo.toml @@ -13,6 +13,7 @@ kaspa-utils.workspace = true kaspa-core.workspace = true faster-hex.workspace = true async-std.workspace = true +log.workspace = true async-trait = "0.1.57" futures = { version = "0.3" } tonic = { version = "0.8", features = ["gzip"] } diff --git a/rpc/grpc/build.rs b/rpc/grpc/build.rs index e6c565480..f8166a9f8 100644 --- a/rpc/grpc/build.rs +++ b/rpc/grpc/build.rs @@ -1,11 +1,14 @@ fn main() { - let proto_file1 = "./proto/messages.proto"; - let proto_file2 = "./proto/kaspadrpc.proto"; - - println!("cargo:rerun-if-changed={}, {}", proto_file1, proto_file2); + let protowire_main_file = "./proto/messages.proto"; tonic_build::configure() .build_server(true) - .compile(&[proto_file1], &["./proto/", "."]) + .build_client(true) + + // In case we want protowire.rs to be explicitely integrated in the crate code, + // uncomment this line and reflect the change in src/lib.rs + //.out_dir("./src") + + .compile(&[protowire_main_file], &["./proto/", "."]) .unwrap_or_else(|e| panic!("protobuf compile error: {}", e)); } diff --git a/rpc/grpc/proto/messages.proto b/rpc/grpc/proto/messages.proto index 637df58c7..23e18d356 100644 --- a/rpc/grpc/proto/messages.proto +++ b/rpc/grpc/proto/messages.proto @@ -6,19 +6,26 @@ import "rpc.proto"; message KaspadRequest { oneof payload { GetCurrentNetworkRequestMessage getCurrentNetworkRequest = 1001; + SubmitBlockRequestMessage submitBlockRequest = 1003; + GetBlockTemplateRequestMessage getBlockTemplateRequest = 1005; NotifyBlockAddedRequestMessage notifyBlockAddedRequest = 1007; GetBlockRequestMessage getBlockRequest = 1025; GetInfoRequestMessage getInfoRequest = 1063; - } + NotifyNewBlockTemplateRequestMessage notifyNewBlockTemplateRequest = 1081; +} } message KaspadResponse { oneof payload { GetCurrentNetworkResponseMessage getCurrentNetworkResponse = 1002; + SubmitBlockResponseMessage submitBlockResponse = 1004; + GetBlockTemplateResponseMessage getBlockTemplateResponse = 1006; NotifyBlockAddedResponseMessage notifyBlockAddedResponse = 1008; BlockAddedNotificationMessage blockAddedNotification = 1009; GetBlockResponseMessage getBlockResponse = 1026; GetInfoResponseMessage getInfoResponse = 1064; + NotifyNewBlockTemplateResponseMessage notifyNewBlockTemplateResponse = 1082; + NewBlockTemplateNotificationMessage newBlockTemplateNotification = 1083; } } diff --git a/rpc/grpc/proto/rpc.proto b/rpc/grpc/proto/rpc.proto index 894c03616..3a0c76024 100644 --- a/rpc/grpc/proto/rpc.proto +++ b/rpc/grpc/proto/rpc.proto @@ -713,7 +713,6 @@ message NotifyNewBlockTemplateResponseMessage { // // See NotifyNewBlockTemplateRequestMessage message NewBlockTemplateNotificationMessage { - RpcNotifyCommand command = 101; } message MempoolEntryByAddress{ diff --git a/rpc/grpc/src/client/mod.rs b/rpc/grpc/src/client/mod.rs index 8bbb88d5c..3168e0666 100644 --- a/rpc/grpc/src/client/mod.rs +++ b/rpc/grpc/src/client/mod.rs @@ -13,7 +13,8 @@ use rpc_core::{ notifier::Notifier, subscriber::Subscriber, }, - GetBlockRequest, GetBlockResponse, GetInfoRequest, GetInfoResponse, NotificationType, RpcError, RpcResult, + GetBlockRequest, GetBlockResponse, GetBlockTemplateRequest, GetBlockTemplateResponse, GetInfoRequest, GetInfoResponse, + NotificationType, RpcError, RpcResult, SubmitBlockRequest, SubmitBlockResponse, }; mod errors; @@ -59,11 +60,19 @@ impl RpcApiGrpc { #[async_trait] impl RpcApi for RpcApiGrpc { - async fn get_block(&self, request: GetBlockRequest) -> RpcResult { + async fn submit_block_call(&self, request: SubmitBlockRequest) -> RpcResult { + self.inner.clone().call(RpcApiOps::SubmitBlock, request).await?.as_ref().try_into() + } + + async fn get_block_template_call(&self, request: GetBlockTemplateRequest) -> RpcResult { + self.inner.clone().call(RpcApiOps::GetBlockTemplate, request).await?.as_ref().try_into() + } + + async fn get_block_call(&self, request: GetBlockRequest) -> RpcResult { self.inner.clone().call(RpcApiOps::GetBlock, request).await?.as_ref().try_into() } - async fn get_info(&self, request: GetInfoRequest) -> RpcResult { + async fn get_info_call(&self, request: GetInfoRequest) -> RpcResult { self.inner.clone().call(RpcApiOps::GetInfo, request).await?.as_ref().try_into() } diff --git a/rpc/grpc/src/client/resolver/matcher.rs b/rpc/grpc/src/client/resolver/matcher.rs index 5801c018e..1a97c0a5d 100644 --- a/rpc/grpc/src/client/resolver/matcher.rs +++ b/rpc/grpc/src/client/resolver/matcher.rs @@ -32,10 +32,13 @@ impl Matcher<&kaspad_response::Payload> for GetBlockRequestMessage { impl Matcher<&kaspad_response::Payload> for kaspad_request::Payload { fn is_matching(&self, response: &kaspad_response::Payload) -> bool { match self { + kaspad_request::Payload::SubmitBlockRequest(_) => true, + kaspad_request::Payload::GetBlockTemplateRequest(_) => true, kaspad_request::Payload::GetBlockRequest(ref request) => request.is_matching(response), kaspad_request::Payload::GetCurrentNetworkRequest(_) => true, kaspad_request::Payload::NotifyBlockAddedRequest(_) => true, kaspad_request::Payload::GetInfoRequest(_) => true, + kaspad_request::Payload::NotifyNewBlockTemplateRequest(_) => true, } } } diff --git a/rpc/grpc/src/convert/block.rs b/rpc/grpc/src/convert/block.rs index a49beb097..abb0b6aa3 100644 --- a/rpc/grpc/src/convert/block.rs +++ b/rpc/grpc/src/convert/block.rs @@ -11,7 +11,7 @@ impl From<&rpc_core::RpcBlock> for protowire::RpcBlock { Self { header: Some(protowire::RpcBlockHeader::from(&item.header)), transactions: item.transactions.iter().map(protowire::RpcTransaction::from).collect(), - verbose_data: Some(protowire::RpcBlockVerboseData::from(&item.verbose_data)), + verbose_data: item.verbose_data.as_ref().map(|x| x.into()), } } } @@ -51,11 +51,7 @@ impl TryFrom<&protowire::RpcBlock> for rpc_core::RpcBlock { .iter() .map(rpc_core::RpcTransaction::try_from) .collect::>>()?, - verbose_data: item - .verbose_data - .as_ref() - .ok_or_else(|| RpcError::MissingRpcFieldError("RpcBlock".to_string(), "verbose_data".to_string()))? - .try_into()?, + verbose_data: item.verbose_data.as_ref().map(rpc_core::RpcBlockVerboseData::try_from).transpose()?, }) } } diff --git a/rpc/grpc/src/convert/header.rs b/rpc/grpc/src/convert/header.rs index 4fcddbfaf..8836ef305 100644 --- a/rpc/grpc/src/convert/header.rs +++ b/rpc/grpc/src/convert/header.rs @@ -6,15 +6,15 @@ use std::str::FromStr; // rpc_core to protowire // ---------------------------------------------------------------------------- -impl From<&rpc_core::RpcBlockHeader> for protowire::RpcBlockHeader { - fn from(item: &rpc_core::RpcBlockHeader) -> Self { +impl From<&rpc_core::RpcHeader> for protowire::RpcBlockHeader { + fn from(item: &rpc_core::RpcHeader) -> Self { Self { - version: item.version, + version: item.version.into(), parents: item.parents.iter().map(protowire::RpcBlockLevelParents::from).collect(), hash_merkle_root: item.hash_merkle_root.to_string(), accepted_id_merkle_root: item.accepted_id_merkle_root.to_string(), utxo_commitment: item.utxo_commitment.to_string(), - timestamp: item.timestamp, + timestamp: item.timestamp.try_into().expect("timestamp is always convertible to i64"), bits: item.bits, nonce: item.nonce, daa_score: item.daa_score, @@ -35,11 +35,14 @@ impl From<&rpc_core::RpcBlockLevelParents> for protowire::RpcBlockLevelParents { // protowire to rpc_core // ---------------------------------------------------------------------------- -impl TryFrom<&protowire::RpcBlockHeader> for rpc_core::RpcBlockHeader { +impl TryFrom<&protowire::RpcBlockHeader> for rpc_core::RpcHeader { type Error = RpcError; fn try_from(item: &protowire::RpcBlockHeader) -> RpcResult { + // TODO: determine if we need to calculate the hash here. + // If so, do a rpc-core to consensus-core conversion to get the hash. Ok(Self { - version: item.version, + hash: Default::default(), + version: item.version.try_into()?, parents: item .parents .iter() @@ -48,7 +51,7 @@ impl TryFrom<&protowire::RpcBlockHeader> for rpc_core::RpcBlockHeader { hash_merkle_root: RpcHash::from_str(&item.hash_merkle_root)?, accepted_id_merkle_root: RpcHash::from_str(&item.accepted_id_merkle_root)?, utxo_commitment: RpcHash::from_str(&item.utxo_commitment)?, - timestamp: item.timestamp, + timestamp: item.timestamp.try_into()?, bits: item.bits, nonce: item.nonce, daa_score: item.daa_score, diff --git a/rpc/grpc/src/convert/kaspad.rs b/rpc/grpc/src/convert/kaspad.rs index 37cc3e9d6..346bee6e5 100644 --- a/rpc/grpc/src/convert/kaspad.rs +++ b/rpc/grpc/src/convert/kaspad.rs @@ -4,12 +4,15 @@ use rpc_core::api::ops::RpcApiOps; impl From<&kaspad_request::Payload> for RpcApiOps { fn from(item: &kaspad_request::Payload) -> Self { match item { + kaspad_request::Payload::SubmitBlockRequest(_) => RpcApiOps::SubmitBlock, + kaspad_request::Payload::GetBlockTemplateRequest(_) => RpcApiOps::GetBlockTemplate, kaspad_request::Payload::GetCurrentNetworkRequest(_) => RpcApiOps::GetCurrentNetwork, kaspad_request::Payload::GetBlockRequest(_) => RpcApiOps::GetBlock, kaspad_request::Payload::GetInfoRequest(_) => RpcApiOps::GetInfo, // Subscription commands for starting/stopping notifications kaspad_request::Payload::NotifyBlockAddedRequest(_) => RpcApiOps::NotifyBlockAdded, + kaspad_request::Payload::NotifyNewBlockTemplateRequest(_) => RpcApiOps::NotifyNewBlockTemplate, } } } @@ -17,15 +20,19 @@ impl From<&kaspad_request::Payload> for RpcApiOps { impl From<&kaspad_response::Payload> for RpcApiOps { fn from(item: &kaspad_response::Payload) -> Self { match item { + kaspad_response::Payload::SubmitBlockResponse(_) => RpcApiOps::SubmitBlock, + kaspad_response::Payload::GetBlockTemplateResponse(_) => RpcApiOps::GetBlockTemplate, kaspad_response::Payload::GetCurrentNetworkResponse(_) => RpcApiOps::GetCurrentNetwork, kaspad_response::Payload::GetBlockResponse(_) => RpcApiOps::GetBlock, kaspad_response::Payload::GetInfoResponse(_) => RpcApiOps::GetInfo, // Subscription commands for starting/stopping notifications kaspad_response::Payload::NotifyBlockAddedResponse(_) => RpcApiOps::NotifyBlockAdded, + kaspad_response::Payload::NotifyNewBlockTemplateResponse(_) => RpcApiOps::NotifyNewBlockTemplate, // Notifications kaspad_response::Payload::BlockAddedNotification(_) => RpcApiOps::Notification, + kaspad_response::Payload::NewBlockTemplateNotification(_) => RpcApiOps::Notification, } } } @@ -52,9 +59,16 @@ pub mod kaspad_request_convert { use crate::protowire::*; use rpc_core::{RpcError, RpcResult}; + impl_into_kaspad_request!(rpc_core::SubmitBlockRequest, SubmitBlockRequestMessage, SubmitBlockRequest); + impl_into_kaspad_request!(rpc_core::GetBlockTemplateRequest, GetBlockTemplateRequestMessage, GetBlockTemplateRequest); impl_into_kaspad_request!(rpc_core::GetBlockRequest, GetBlockRequestMessage, GetBlockRequest); impl_into_kaspad_request!(rpc_core::NotifyBlockAddedRequest, NotifyBlockAddedRequestMessage, NotifyBlockAddedRequest); impl_into_kaspad_request!(rpc_core::GetInfoRequest, GetInfoRequestMessage, GetInfoRequest); + impl_into_kaspad_request!( + rpc_core::NotifyNewBlockTemplateRequest, + NotifyNewBlockTemplateRequestMessage, + NotifyNewBlockTemplateRequest + ); macro_rules! impl_into_kaspad_request { ($($core_struct:ident)::+, $($protowire_struct:ident)::+, $($variant:ident)::+) => { @@ -137,11 +151,23 @@ pub mod kaspad_response_convert { use crate::protowire::*; use rpc_core::{RpcError, RpcResult}; + impl_into_kaspad_response!(rpc_core::SubmitBlockResponse, SubmitBlockResponseMessage, SubmitBlockResponse); + impl_into_kaspad_response!(rpc_core::GetBlockTemplateResponse, GetBlockTemplateResponseMessage, GetBlockTemplateResponse); impl_into_kaspad_response!(rpc_core::GetBlockResponse, GetBlockResponseMessage, GetBlockResponse); impl_into_kaspad_response!(rpc_core::GetInfoResponse, GetInfoResponseMessage, GetInfoResponse); impl_into_kaspad_response!(rpc_core::NotifyBlockAddedResponse, NotifyBlockAddedResponseMessage, NotifyBlockAddedResponse); impl_into_kaspad_notify_response!(rpc_core::NotifyBlockAddedResponse, NotifyBlockAddedResponseMessage, NotifyBlockAddedResponse); + impl_into_kaspad_response!( + rpc_core::NotifyNewBlockTemplateResponse, + NotifyNewBlockTemplateResponseMessage, + NotifyNewBlockTemplateResponse + ); + impl_into_kaspad_notify_response!( + rpc_core::NotifyNewBlockTemplateResponse, + NotifyNewBlockTemplateResponseMessage, + NotifyNewBlockTemplateResponse + ); macro_rules! impl_into_kaspad_response { ($($core_struct:ident)::+, $($protowire_struct:ident)::+, $($variant:ident)::+) => { diff --git a/rpc/grpc/src/convert/message.rs b/rpc/grpc/src/convert/message.rs index 123c5dbf7..bd096a8b4 100644 --- a/rpc/grpc/src/convert/message.rs +++ b/rpc/grpc/src/convert/message.rs @@ -1,11 +1,54 @@ -use crate::protowire; -use rpc_core::{RpcError, RpcHash, RpcResult}; +use crate::protowire::{self, submit_block_response_message::RejectReason}; +use rpc_core::{RpcError, RpcExtraData, RpcHash, RpcResult}; use std::str::FromStr; // ---------------------------------------------------------------------------- // rpc_core to protowire // ---------------------------------------------------------------------------- +impl From<&rpc_core::SubmitBlockRequest> for protowire::SubmitBlockRequestMessage { + fn from(item: &rpc_core::SubmitBlockRequest) -> Self { + Self { block: Some((&item.block).into()), allow_non_daa_blocks: item.allow_non_daa_blocks } + } +} + +impl From<&rpc_core::SubmitBlockReport> for RejectReason { + fn from(item: &rpc_core::SubmitBlockReport) -> Self { + match item { + rpc_core::SubmitBlockReport::Success => RejectReason::None, + rpc_core::SubmitBlockReport::Reject(rpc_core::SubmitBlockRejectReason::BlockInvalid) => RejectReason::BlockInvalid, + rpc_core::SubmitBlockReport::Reject(rpc_core::SubmitBlockRejectReason::IsInIBD) => RejectReason::IsInIbd, + } + } +} + +impl From> for protowire::SubmitBlockResponseMessage { + fn from(item: RpcResult<&rpc_core::SubmitBlockResponse>) -> Self { + Self { + reject_reason: item.as_ref().map(|x| RejectReason::from(&x.report)).unwrap_or(RejectReason::None) as i32, + error: item.map_err(protowire::RpcError::from).err(), + } + } +} + +impl From<&rpc_core::GetBlockTemplateRequest> for protowire::GetBlockTemplateRequestMessage { + fn from(item: &rpc_core::GetBlockTemplateRequest) -> Self { + Self { + pay_address: (&item.pay_address).into(), + extra_data: String::from_utf8(item.extra_data.clone()).expect("extra data has to be valid UTF-8"), + } + } +} + +impl From> for protowire::GetBlockTemplateResponseMessage { + fn from(item: RpcResult<&rpc_core::GetBlockTemplateResponse>) -> Self { + match item { + Ok(response) => Self { block: Some((&response.block).into()), is_synced: response.is_synced, error: None }, + Err(err) => Self { block: None, is_synced: false, error: Some(err.into()) }, + } + } +} + impl From<&rpc_core::GetBlockRequest> for protowire::GetBlockRequestMessage { fn from(item: &rpc_core::GetBlockRequest) -> Self { Self { hash: item.hash.to_string(), include_transactions: item.include_transactions } @@ -64,10 +107,75 @@ impl From> for protowire::GetInfoResponseM } } +impl From<&rpc_core::NotifyNewBlockTemplateRequest> for protowire::NotifyNewBlockTemplateRequestMessage { + fn from(item: &rpc_core::NotifyNewBlockTemplateRequest) -> Self { + Self { command: item.command.into() } + } +} + +impl From> for protowire::NotifyNewBlockTemplateResponseMessage { + fn from(item: RpcResult<&rpc_core::NotifyNewBlockTemplateResponse>) -> Self { + Self { error: item.map_err(protowire::RpcError::from).err() } + } +} + // ---------------------------------------------------------------------------- // protowire to rpc_core // ---------------------------------------------------------------------------- +impl TryFrom<&protowire::SubmitBlockRequestMessage> for rpc_core::SubmitBlockRequest { + type Error = RpcError; + fn try_from(item: &protowire::SubmitBlockRequestMessage) -> RpcResult { + if item.block.is_none() { + return Err(RpcError::MissingRpcFieldError("SubmitBlockRequestMessage".to_string(), "block".to_string())); + } + Ok(Self { block: item.block.as_ref().unwrap().try_into()?, allow_non_daa_blocks: item.allow_non_daa_blocks }) + } +} + +impl From for rpc_core::SubmitBlockReport { + fn from(item: RejectReason) -> Self { + match item { + RejectReason::None => rpc_core::SubmitBlockReport::Success, + RejectReason::BlockInvalid => rpc_core::SubmitBlockReport::Reject(rpc_core::SubmitBlockRejectReason::BlockInvalid), + RejectReason::IsInIbd => rpc_core::SubmitBlockReport::Reject(rpc_core::SubmitBlockRejectReason::IsInIBD), + } + } +} + +impl TryFrom<&protowire::SubmitBlockResponseMessage> for rpc_core::SubmitBlockResponse { + type Error = RpcError; + fn try_from(item: &protowire::SubmitBlockResponseMessage) -> RpcResult { + Ok(Self { report: RejectReason::from_i32(item.reject_reason).ok_or(RpcError::PrimitiveToEnumConversionError)?.into() }) + } +} + +impl TryFrom<&protowire::GetBlockTemplateRequestMessage> for rpc_core::GetBlockTemplateRequest { + type Error = RpcError; + fn try_from(item: &protowire::GetBlockTemplateRequestMessage) -> RpcResult { + Ok(Self { pay_address: item.pay_address.clone().try_into()?, extra_data: RpcExtraData::from_iter(item.extra_data.bytes()) }) + } +} + +impl TryFrom<&protowire::GetBlockTemplateResponseMessage> for rpc_core::GetBlockTemplateResponse { + type Error = RpcError; + fn try_from(item: &protowire::GetBlockTemplateResponseMessage) -> RpcResult { + item.block + .as_ref() + .map_or_else( + || { + item.error + .as_ref() + .map_or(Err(RpcError::MissingRpcFieldError("GetBlockResponseMessage".to_string(), "error".to_string())), |x| { + Err(x.into()) + }) + }, + rpc_core::RpcBlock::try_from, + ) + .map(|x| rpc_core::GetBlockTemplateResponse { block: x, is_synced: item.is_synced }) + } +} + impl TryFrom<&protowire::GetBlockRequestMessage> for rpc_core::GetBlockRequest { type Error = RpcError; fn try_from(item: &protowire::GetBlockRequestMessage) -> RpcResult { @@ -110,7 +218,7 @@ impl TryFrom<&protowire::NotifyBlockAddedResponseMessage> for rpc_core::NotifyBl impl TryFrom<&protowire::GetInfoRequestMessage> for rpc_core::GetInfoRequest { type Error = RpcError; - fn try_from(_item: &protowire::GetInfoRequestMessage) -> RpcResult { + fn try_from(_: &protowire::GetInfoRequestMessage) -> RpcResult { Ok(Self {}) } } @@ -132,3 +240,23 @@ impl TryFrom<&protowire::GetInfoResponseMessage> for rpc_core::GetInfoResponse { } } } + +impl TryFrom<&protowire::NotifyNewBlockTemplateRequestMessage> for rpc_core::NotifyNewBlockTemplateRequest { + type Error = RpcError; + fn try_from(item: &protowire::NotifyNewBlockTemplateRequestMessage) -> RpcResult { + Ok(Self { command: item.command.into() }) + } +} + +impl TryFrom<&protowire::NotifyNewBlockTemplateResponseMessage> for rpc_core::NotifyNewBlockTemplateResponse { + type Error = RpcError; + fn try_from(item: &protowire::NotifyNewBlockTemplateResponseMessage) -> RpcResult { + item.error.as_ref().map_or(Ok(rpc_core::NotifyNewBlockTemplateResponse {}), |x| Err(x.into())) + } +} + +// ---------------------------------------------------------------------------- +// Unit tests +// ---------------------------------------------------------------------------- + +// TODO: tests diff --git a/rpc/grpc/src/convert/notification.rs b/rpc/grpc/src/convert/notification.rs index cd87004c7..fdb80f468 100644 --- a/rpc/grpc/src/convert/notification.rs +++ b/rpc/grpc/src/convert/notification.rs @@ -1,6 +1,8 @@ use rpc_core::{Notification, RpcError, RpcResult}; -use crate::protowire::{kaspad_response::Payload, BlockAddedNotificationMessage, KaspadResponse, RpcNotifyCommand}; +use crate::protowire::{ + kaspad_response::Payload, BlockAddedNotificationMessage, KaspadResponse, NewBlockTemplateNotificationMessage, RpcNotifyCommand, +}; // ---------------------------------------------------------------------------- // rpc_core to protowire @@ -16,6 +18,7 @@ impl From<&rpc_core::Notification> for Payload { fn from(item: &rpc_core::Notification) -> Self { match item { Notification::BlockAdded(ref notif) => Payload::BlockAddedNotification(notif.into()), + Notification::NewBlockTemplate(ref notif) => Payload::NewBlockTemplateNotification(notif.into()), Notification::VirtualSelectedParentChainChanged(_) => todo!(), Notification::FinalityConflict(_) => todo!(), Notification::FinalityConflictResolved(_) => todo!(), @@ -23,7 +26,6 @@ impl From<&rpc_core::Notification> for Payload { Notification::VirtualSelectedParentBlueScoreChanged(_) => todo!(), Notification::VirtualDaaScoreChanged(_) => todo!(), Notification::PruningPointUTXOSetOverride(_) => todo!(), - Notification::NewBlockTemplate(_) => todo!(), } } } @@ -34,6 +36,12 @@ impl From<&rpc_core::BlockAddedNotification> for BlockAddedNotificationMessage { } } +impl From<&rpc_core::NewBlockTemplateNotification> for NewBlockTemplateNotificationMessage { + fn from(_: &rpc_core::NewBlockTemplateNotification) -> Self { + Self {} + } +} + impl From for RpcNotifyCommand { fn from(item: rpc_core::api::ops::SubscribeCommand) -> Self { match item { @@ -78,6 +86,13 @@ impl TryFrom<&BlockAddedNotificationMessage> for rpc_core::BlockAddedNotificatio } } +impl TryFrom<&NewBlockTemplateNotificationMessage> for rpc_core::NewBlockTemplateNotification { + type Error = RpcError; + fn try_from(_: &NewBlockTemplateNotificationMessage) -> RpcResult { + Ok(Self {}) + } +} + impl From for rpc_core::api::ops::SubscribeCommand { fn from(item: RpcNotifyCommand) -> Self { match item { diff --git a/rpc/grpc/src/convert/tx.rs b/rpc/grpc/src/convert/tx.rs index c9c80c33b..be4592ab8 100644 --- a/rpc/grpc/src/convert/tx.rs +++ b/rpc/grpc/src/convert/tx.rs @@ -9,14 +9,14 @@ use std::str::FromStr; impl From<&rpc_core::RpcTransaction> for protowire::RpcTransaction { fn from(item: &rpc_core::RpcTransaction) -> Self { Self { - version: item.version, + version: item.version.into(), inputs: item.inputs.iter().map(protowire::RpcTransactionInput::from).collect(), outputs: item.outputs.iter().map(protowire::RpcTransactionOutput::from).collect(), lock_time: item.lock_time, subnetwork_id: item.subnetwork_id.to_string(), gas: item.gas, payload: item.payload.to_string(), - verbose_data: Some((&item.verbose_data).into()), + verbose_data: item.verbose_data.as_ref().map(|x| x.into()), } } } @@ -27,7 +27,7 @@ impl From<&rpc_core::RpcTransactionInput> for protowire::RpcTransactionInput { previous_outpoint: Some((&item.previous_outpoint).into()), signature_script: item.signature_script.to_string(), sequence: item.sequence, - sig_op_count: item.sig_op_count, + sig_op_count: item.sig_op_count.into(), verbose_data: item.verbose_data.as_ref().map(|x| x.into()), } } @@ -36,15 +36,15 @@ impl From<&rpc_core::RpcTransactionInput> for protowire::RpcTransactionInput { impl From<&rpc_core::RpcTransactionOutput> for protowire::RpcTransactionOutput { fn from(item: &rpc_core::RpcTransactionOutput) -> Self { Self { - amount: item.amount, + amount: item.value, script_public_key: Some((&item.script_public_key).into()), - verbose_data: Some((&item.verbose_data).into()), + verbose_data: item.verbose_data.as_ref().map(|x| x.into()), } } } -impl From<&rpc_core::RpcOutpoint> for protowire::RpcOutpoint { - fn from(item: &rpc_core::RpcOutpoint) -> Self { +impl From<&rpc_core::RpcTransactionOutpoint> for protowire::RpcOutpoint { + fn from(item: &rpc_core::RpcTransactionOutpoint) -> Self { Self { transaction_id: item.transaction_id.to_string(), index: item.index } } } @@ -101,7 +101,7 @@ impl TryFrom<&protowire::RpcTransaction> for rpc_core::RpcTransaction { type Error = RpcError; fn try_from(item: &protowire::RpcTransaction) -> RpcResult { Ok(Self { - version: item.version, + version: item.version.try_into()?, inputs: item .inputs .iter() @@ -116,11 +116,7 @@ impl TryFrom<&protowire::RpcTransaction> for rpc_core::RpcTransaction { subnetwork_id: rpc_core::RpcSubnetworkId::from_str(&item.subnetwork_id)?, gas: item.gas, payload: RpcHexData::from_str(&item.payload)?, - verbose_data: item - .verbose_data - .as_ref() - .ok_or_else(|| RpcError::MissingRpcFieldError("RpcTransaction".to_string(), "verbose_data".to_string()))? - .try_into()?, + verbose_data: item.verbose_data.as_ref().map(rpc_core::RpcTransactionVerboseData::try_from).transpose()?, }) } } @@ -136,7 +132,7 @@ impl TryFrom<&protowire::RpcTransactionInput> for rpc_core::RpcTransactionInput .try_into()?, signature_script: RpcHexData::from_str(&item.signature_script)?, sequence: item.sequence, - sig_op_count: item.sig_op_count, + sig_op_count: item.sig_op_count.try_into()?, verbose_data: item.verbose_data.as_ref().map(rpc_core::RpcTransactionInputVerboseData::try_from).transpose()?, }) } @@ -146,22 +142,18 @@ impl TryFrom<&protowire::RpcTransactionOutput> for rpc_core::RpcTransactionOutpu type Error = RpcError; fn try_from(item: &protowire::RpcTransactionOutput) -> RpcResult { Ok(Self { - amount: item.amount, + value: item.amount, script_public_key: item .script_public_key .as_ref() .ok_or_else(|| RpcError::MissingRpcFieldError("RpcTransactionOutput".to_string(), "script_public_key".to_string()))? .try_into()?, - verbose_data: item - .verbose_data - .as_ref() - .ok_or_else(|| RpcError::MissingRpcFieldError("RpcTransactionOutput".to_string(), "verbose_data".to_string()))? - .try_into()?, + verbose_data: item.verbose_data.as_ref().map(rpc_core::RpcTransactionOutputVerboseData::try_from).transpose()?, }) } } -impl TryFrom<&protowire::RpcOutpoint> for rpc_core::RpcOutpoint { +impl TryFrom<&protowire::RpcOutpoint> for rpc_core::RpcTransactionOutpoint { type Error = RpcError; fn try_from(item: &protowire::RpcOutpoint) -> RpcResult { Ok(Self { transaction_id: RpcHash::from_str(&item.transaction_id)?, index: item.index }) diff --git a/rpc/grpc/src/ext/kaspad.rs b/rpc/grpc/src/ext/kaspad.rs index a1f6b98c9..e16915638 100644 --- a/rpc/grpc/src/ext/kaspad.rs +++ b/rpc/grpc/src/ext/kaspad.rs @@ -1,6 +1,9 @@ use rpc_core::{api::ops::SubscribeCommand, NotificationType}; -use crate::protowire::{kaspad_request, kaspad_response, KaspadRequest, KaspadResponse, NotifyBlockAddedRequestMessage}; +use crate::protowire::{ + kaspad_request, kaspad_response, KaspadRequest, KaspadResponse, NotifyBlockAddedRequestMessage, + NotifyNewBlockTemplateRequestMessage, +}; impl KaspadRequest { pub fn from_notification_type(notification_type: &NotificationType, command: SubscribeCommand) -> Self { @@ -13,7 +16,10 @@ impl kaspad_request::Payload { match notification_type { NotificationType::BlockAdded => { kaspad_request::Payload::NotifyBlockAddedRequest(NotifyBlockAddedRequestMessage { command: command.into() }) - } + }, + NotificationType::NewBlockTemplate => { + kaspad_request::Payload::NotifyNewBlockTemplateRequest(NotifyNewBlockTemplateRequestMessage { command: command.into() }) + }, // TODO: implement all other notifications _ => { diff --git a/rpc/grpc/src/lib.rs b/rpc/grpc/src/lib.rs index 613e78f4f..9c909b402 100644 --- a/rpc/grpc/src/lib.rs +++ b/rpc/grpc/src/lib.rs @@ -1,10 +1,12 @@ -#[allow(clippy::derive_partial_eq_without_eq)] -pub mod protowire { - tonic::include_proto!("protowire"); -} - pub mod client; pub mod server; pub mod convert; pub mod ext; + +// We have no control over the code generated by the tonic builder, +// so clippy is fully disabled here. +#[allow(clippy::all)] +pub mod protowire { + tonic::include_proto!("protowire"); +} diff --git a/rpc/grpc/src/server/connection.rs b/rpc/grpc/src/server/connection.rs index 987064e8f..d93796fee 100644 --- a/rpc/grpc/src/server/connection.rs +++ b/rpc/grpc/src/server/connection.rs @@ -64,7 +64,7 @@ impl GrpcConnection { notification = recv_channel.recv() => { match notification { Ok(notification) => { - trace!("[GrpcConnection] collect_task listener id {0}: notification", listener_id); + trace!("sending {} to listener id {}", notification, listener_id); match sender.send(Ok((&*notification).into())).await { Ok(_) => (), Err(err) => { diff --git a/rpc/grpc/src/server/mod.rs b/rpc/grpc/src/server/mod.rs index 9154fd45e..c6260e69c 100644 --- a/rpc/grpc/src/server/mod.rs +++ b/rpc/grpc/src/server/mod.rs @@ -1,26 +1,103 @@ use crate::protowire::rpc_server::RpcServer; -use kaspa_core::trace; +use kaspa_core::{ + task::service::{AsyncService, AsyncServiceFuture}, + trace, +}; +use kaspa_utils::triggers::DuplexTrigger; use rpc_core::server::service::RpcCoreService; use std::net::SocketAddr; use std::sync::Arc; -use tokio::task::JoinHandle; -use tonic::codec::CompressionEncoding; -use tonic::transport::{Error, Server}; +use tonic::{codec::CompressionEncoding, transport::Server}; pub mod connection; pub mod service; pub type StatusResult = Result; -// TODO: use ctrl-c signaling infrastructure of kaspa-core +const GRPC_SERVER: &str = "grpc-server"; -pub fn run_server(address: SocketAddr, core_service: Arc) -> JoinHandle> { - trace!("KaspadRPCServer listening on: {}", address); +pub struct GrpcServer { + address: SocketAddr, + grpc_service: Arc, + shutdown: DuplexTrigger, +} + +impl GrpcServer { + pub fn new(address: SocketAddr, core_service: Arc) -> Self { + let grpc_service = Arc::new(service::GrpcService::new(core_service)); + Self { address, grpc_service, shutdown: DuplexTrigger::default() } + } +} + +impl AsyncService for GrpcServer { + fn ident(self: Arc) -> &'static str { + GRPC_SERVER + } + + fn start(self: Arc) -> AsyncServiceFuture { + trace!("{} starting", GRPC_SERVER); + + let grpc_service = self.grpc_service.clone(); + let address = self.address; + + // Prepare a start shutdown signal receiver and a shutdown ended signal sender + let shutdown_signal = self.shutdown.request.listener.clone(); + let shutdown_executed = self.shutdown.response.trigger.clone(); + + // Return a future launching the tonic server and waiting for it to shutdown + Box::pin(async move { + // Start the gRPC service + grpc_service.start(); + + // Create a protowire RPC server + let svc = RpcServer::new(self.grpc_service.clone()) + .send_compressed(CompressionEncoding::Gzip) + .accept_compressed(CompressionEncoding::Gzip); + + // Start the tonic gRPC server + trace!("gRPC server listening on: {}", address); + match Server::builder().add_service(svc).serve_with_shutdown(address, shutdown_signal).await { + Ok(_) => { + trace!("gRPC server exited gracefully"); + } + Err(err) => { + trace!("gRPC server exited with error {0}", err); + } + } + + // Send a signal telling the shutdown is done + shutdown_executed.trigger(); + }) + } - let grpc_service = service::RpcService::new(core_service); - grpc_service.start(); + fn signal_exit(self: Arc) { + trace!("sending an exit signal to {}", GRPC_SERVER); + self.shutdown.request.trigger.trigger(); + } - let svc = RpcServer::new(grpc_service).send_compressed(CompressionEncoding::Gzip).accept_compressed(CompressionEncoding::Gzip); + fn stop(self: Arc) -> AsyncServiceFuture { + trace!("{} stopping", GRPC_SERVER); + // Launch the shutdown process as a task + let shutdown_executed_signal = self.shutdown.response.listener.clone(); + let grpc_service = self.grpc_service.clone(); + Box::pin(async move { + // Wait for the tonic server to gracefully shutdown + shutdown_executed_signal.await; - tokio::spawn(async move { Server::builder().add_service(svc).serve(address).await }) + // Stop the gRPC service gracefully + match grpc_service.stop().await { + Ok(_) => {} + Err(err) => { + trace!("Error while stopping the gRPC service: {0}", err); + } + } + match grpc_service.finalize().await { + Ok(_) => {} + Err(err) => { + trace!("Error while finalizing the gRPC service: {0}", err); + } + } + trace!("{} exiting", GRPC_SERVER); + }) + } } diff --git a/rpc/grpc/src/server/service.rs b/rpc/grpc/src/server/service.rs index da8b27699..f9490f5f8 100644 --- a/rpc/grpc/src/server/service.rs +++ b/rpc/grpc/src/server/service.rs @@ -1,7 +1,8 @@ use super::connection::{GrpcConnectionManager, GrpcSender}; +use crate::protowire::NotifyNewBlockTemplateResponseMessage; use crate::protowire::{ - kaspad_request::Payload, rpc_server::Rpc, GetBlockResponseMessage, GetInfoResponseMessage, KaspadRequest, KaspadResponse, - NotifyBlockAddedResponseMessage, + kaspad_request::Payload, rpc_server::Rpc, GetBlockResponseMessage, GetBlockTemplateResponseMessage, GetInfoResponseMessage, + KaspadRequest, KaspadResponse, NotifyBlockAddedResponseMessage, SubmitBlockResponseMessage, }; use crate::server::StatusResult; use futures::Stream; @@ -18,6 +19,7 @@ use rpc_core::{ }; use std::{io::ErrorKind, net::SocketAddr, pin::Pin, sync::Arc}; use tokio::sync::{mpsc, RwLock}; +use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response}; /// A protowire RPC service. @@ -44,12 +46,12 @@ use tonic::{Request, Response}; /// - stop /// - finalize /// -/// _Object is ready for being dropped. Any further usage of it is undefined behaviour._ +/// _Object is ready for being dropped. Any further usage of it is undefined behavior._ /// /// #### Further development /// -/// TODO: implement a queue of requests and a pool of workers preparing and sending back the reponses. -pub struct RpcService { +/// TODO: implement a queue of requests and a pool of workers preparing and sending back the responses. +pub struct GrpcService { core_service: Arc, core_channel: NotificationChannel, core_listener: Arc, @@ -57,7 +59,7 @@ pub struct RpcService { notifier: Arc, } -impl RpcService { +impl GrpcService { pub fn new(core_service: Arc) -> Self { // Prepare core objects let core_channel = NotificationChannel::default(); @@ -100,7 +102,6 @@ impl RpcService { Ok(()) } - // TODO: implement a proper server shutdown actually calling finalize. pub async fn finalize(&self) -> RpcResult<()> { self.core_service.unregister_listener(self.core_listener.id).await?; self.core_channel.receiver().close(); @@ -109,7 +110,7 @@ impl RpcService { } #[tonic::async_trait] -impl Rpc for RpcService { +impl Rpc for Arc { type MessageStreamStream = Pin> + Send + Sync + 'static>>; async fn message_stream( @@ -122,11 +123,11 @@ impl Rpc for RpcService { trace!("MessageStream from {:?}", remote_addr); - // External sender and reciever + // External sender and receiver let (send_channel, mut recv_channel) = mpsc::channel::>(128); let listener_id = self.register_connection(remote_addr, send_channel.clone()).await; - // Internal related sender and reciever + // Internal related sender and receiver let (stream_tx, stream_rx) = mpsc::channel::>(10); // KaspadResponse forwarder @@ -148,20 +149,30 @@ impl Rpc for RpcService { let core_service = self.core_service.clone(); let connection_manager = self.connection_manager.clone(); let notifier = self.notifier.clone(); - let mut stream: tonic::Streaming = request.into_inner(); + let mut request_stream: tonic::Streaming = request.into_inner(); tokio::spawn(async move { loop { - match stream.message().await { + match request_stream.message().await { Ok(Some(request)) => { - trace!("Request is {:?}", request); + //trace!("Incoming {:?}", request); let response: KaspadResponse = match request.payload { + Some(Payload::SubmitBlockRequest(ref request)) => match request.try_into() { + Ok(request) => core_service.submit_block_call(request).await.into(), + Err(err) => SubmitBlockResponseMessage::from(err).into(), + }, + + Some(Payload::GetBlockTemplateRequest(ref request)) => match request.try_into() { + Ok(request) => core_service.get_block_template_call(request).await.into(), + Err(err) => GetBlockTemplateResponseMessage::from(err).into(), + }, + Some(Payload::GetBlockRequest(ref request)) => match request.try_into() { - Ok(request) => core_service.get_block(request).await.into(), + Ok(request) => core_service.get_block_call(request).await.into(), Err(err) => GetBlockResponseMessage::from(err).into(), }, Some(Payload::GetInfoRequest(ref request)) => match request.try_into() { - Ok(request) => core_service.get_info(request).await.into(), + Ok(request) => core_service.get_info_call(request).await.into(), Err(err) => GetInfoResponseMessage::from(err).into(), }, @@ -175,12 +186,25 @@ impl Rpc for RpcService { }) .into(), + Some(Payload::NotifyNewBlockTemplateRequest(ref request)) => { + NotifyNewBlockTemplateResponseMessage::from({ + let request = rpc_core::NotifyNewBlockTemplateRequest::try_from(request).unwrap(); + notifier.clone().execute_subscribe_command( + listener_id, + rpc_core::NotificationType::NewBlockTemplate, + request.command, + ) + }) + .into() + } + // TODO: This must be replaced by actual handling of all request variants _ => GetBlockResponseMessage::from(rpc_core::RpcError::General( "Server-side API Not implemented".to_string(), )) .into(), }; + //trace!("Outgoing {:?}", response); match send_channel.send(Ok(response)).await { Ok(_) => {} @@ -199,14 +223,14 @@ impl Rpc for RpcService { if io_err.kind() == ErrorKind::BrokenPipe { // here you can handle special case when client // disconnected in unexpected way - eprintln!("\tRequest handler stream {0} error: client disconnected, broken pipe", remote_addr); + trace!("\tRequest handler stream {0} error: client disconnected, broken pipe", remote_addr); break; } } match send_channel.send(Err(err)).await { Ok(_) => (), - Err(_err) => break, // response was droped + Err(_err) => break, // response was dropped } } } @@ -216,8 +240,8 @@ impl Rpc for RpcService { }); // Return connection stream - - Ok(Response::new(Box::pin(tokio_stream::wrappers::ReceiverStream::new(stream_rx)))) + let response_stream = ReceiverStream::new(stream_rx); + Ok(Response::new(Box::pin(response_stream))) } } diff --git a/simpa/src/main.rs b/simpa/src/main.rs index bafac783b..21d905bf1 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -8,12 +8,11 @@ use consensus::{ ghostdag::{GhostdagStoreReader, KType}, headers::HeaderStoreReader, relations::RelationsStoreReader, - statuses::BlockStatus, }, params::{Params, DEVNET_PARAMS}, processes::ghostdag::ordering::SortableBlock, }; -use consensus_core::{block::Block, header::Header, BlockHashSet, HashMapCustomHasher}; +use consensus_core::{block::Block, blockstatus::BlockStatus, header::Header, BlockHashSet, HashMapCustomHasher}; use futures::{future::join_all, Future}; use hashes::Hash; use itertools::Itertools; @@ -63,6 +62,11 @@ struct Args { /// Defaults to the number of logical CPU cores. #[arg(short, long)] virtual_threads: Option, + + /// Logging level for all subsystems {off, error, warn, info, debug, trace} + /// -- You may also specify =,=,... to set the log level for individual subsystems + #[arg(long = "loglevel", default_value = "info")] + log_level: String, } /// Calculates the k parameter of the GHOSTDAG protocol such that anticones lager than k will be created @@ -86,7 +90,15 @@ fn calculate_ghostdag_k(x: f64, delta: f64) -> u64 { fn main() { let args = Args::parse(); + kaspa_core::log::init_logger(&args.log_level); assert!(args.bps * args.delay < 250.0, "The delay times bps product is larger than 250"); + if args.miners > 1 { + println!( + "Warning: number of miners was configured to {}. Currently each miner added doubles the simulation + memory and runtime footprint, while a single miner is sufficient for most simulation purposes (delay is simulated anyway).", + args.miners + ); + } let mut params = DEVNET_PARAMS.clone_with_skip_pow(); let mut perf_params = PERF_PARAMS; adjust_consensus_params(&args, &mut params); diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index 9ed904111..587333c5f 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -1,10 +1,10 @@ use super::infra::{Environment, Process, Resumption, Suspension}; use consensus::consensus::Consensus; use consensus::errors::{BlockProcessResult, RuleError}; -use consensus::model::stores::statuses::BlockStatus; use consensus::model::stores::virtual_state::VirtualStateStoreReader; use consensus::params::Params; use consensus_core::block::Block; +use consensus_core::blockstatus::BlockStatus; use consensus_core::coinbase::MinerData; use consensus_core::sign::sign; use consensus_core::subnets::SUBNETWORK_ID_NATIVE; @@ -98,7 +98,7 @@ impl Miner { let txs = self.build_txs(); let nonce = self.id; - let mut block_template = self.consensus.build_block_template(self.miner_data.clone(), txs); + let mut block_template = self.consensus.as_ref().build_block_template(self.miner_data.clone(), txs); block_template.block.header.timestamp = timestamp; // Use simulation time rather than real time block_template.block.header.nonce = nonce; block_template.block.header.finalize();