diff --git a/Cargo.lock b/Cargo.lock index a0db54630..a951993e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2265,7 +2265,7 @@ dependencies = [ [[package]] name = "kaspa-addresses" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "criterion", @@ -2282,7 +2282,7 @@ dependencies = [ [[package]] name = "kaspa-addressmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "igd-next", @@ -2304,14 +2304,14 @@ dependencies = [ [[package]] name = "kaspa-alloc" -version = "0.15.2" +version = "0.15.3" dependencies = [ "mimalloc", ] [[package]] name = "kaspa-bip32" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "bs58", @@ -2338,7 +2338,7 @@ dependencies = [ [[package]] name = "kaspa-cli" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2385,7 +2385,7 @@ dependencies = [ [[package]] name = "kaspa-connectionmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "duration-string", "futures-util", @@ -2402,7 +2402,7 @@ dependencies = [ [[package]] name = "kaspa-consensus" -version = "0.15.2" +version = "0.15.3" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -2446,7 +2446,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ahash", "cfg-if 1.0.0", @@ -2474,7 +2474,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "arc-swap", "async-trait", @@ -2513,7 +2513,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-notify" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -2532,7 +2532,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "faster-hex", @@ -2556,7 +2556,7 @@ dependencies = [ [[package]] name = "kaspa-consensusmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "duration-string", "futures", @@ -2574,7 +2574,7 @@ dependencies = [ [[package]] name = "kaspa-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "ctrlc", @@ -2592,7 +2592,7 @@ dependencies = [ [[package]] name = "kaspa-daemon" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2614,7 +2614,7 @@ dependencies = [ [[package]] name = "kaspa-database" -version = "0.15.2" +version = "0.15.3" dependencies = [ "bincode", "enum-primitive-derive", @@ -2636,7 +2636,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2668,7 +2668,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2697,7 +2697,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-server" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2733,7 +2733,7 @@ dependencies = [ [[package]] name = "kaspa-hashes" -version = "0.15.2" +version = "0.15.3" dependencies = [ "blake2b_simd", "borsh", @@ -2754,7 +2754,7 @@ dependencies = [ [[package]] name = "kaspa-index-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2773,7 +2773,7 @@ dependencies = [ [[package]] name = "kaspa-index-processor" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2801,7 +2801,7 @@ dependencies = [ [[package]] name = "kaspa-math" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "criterion", @@ -2822,14 +2822,14 @@ dependencies = [ [[package]] name = "kaspa-merkle" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-hashes", ] [[package]] name = "kaspa-metrics-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2845,7 +2845,7 @@ dependencies = [ [[package]] name = "kaspa-mining" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "futures-util", @@ -2872,7 +2872,7 @@ dependencies = [ [[package]] name = "kaspa-mining-errors" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-consensus-core", "thiserror", @@ -2880,7 +2880,7 @@ dependencies = [ [[package]] name = "kaspa-muhash" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "kaspa-hashes", @@ -2893,7 +2893,7 @@ dependencies = [ [[package]] name = "kaspa-notify" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2929,7 +2929,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-flows" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "chrono", @@ -2960,7 +2960,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-lib" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "ctrlc", @@ -2991,7 +2991,7 @@ dependencies = [ [[package]] name = "kaspa-perf-monitor" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-core", "log", @@ -3003,7 +3003,7 @@ dependencies = [ [[package]] name = "kaspa-pow" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "js-sys", @@ -3019,7 +3019,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3061,7 +3061,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-macros" -version = "0.15.2" +version = "0.15.3" dependencies = [ "convert_case 0.6.0", "proc-macro-error", @@ -3073,7 +3073,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-service" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "kaspa-addresses", @@ -3102,7 +3102,7 @@ dependencies = [ [[package]] name = "kaspa-testing-integration" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3162,7 +3162,7 @@ dependencies = [ [[package]] name = "kaspa-txscript" -version = "0.15.2" +version = "0.15.3" dependencies = [ "blake2b_simd", "borsh", @@ -3194,7 +3194,7 @@ dependencies = [ [[package]] name = "kaspa-txscript-errors" -version = "0.15.2" +version = "0.15.3" dependencies = [ "secp256k1", "thiserror", @@ -3202,7 +3202,7 @@ dependencies = [ [[package]] name = "kaspa-utils" -version = "0.15.2" +version = "0.15.3" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "kaspa-utils-tower" -version = "0.15.2" +version = "0.15.3" dependencies = [ "bytes", "cfg-if 1.0.0", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "kaspa-utxoindex" -version = "0.15.2" +version = "0.15.3" dependencies = [ "futures", "kaspa-consensus", @@ -3275,7 +3275,7 @@ dependencies = [ [[package]] name = "kaspa-wallet" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-std", "async-trait", @@ -3287,7 +3287,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-cli-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "js-sys", @@ -3301,7 +3301,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "aes", "ahash", @@ -3382,7 +3382,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-keys" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -3415,7 +3415,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-macros" -version = "0.15.2" +version = "0.15.3" dependencies = [ "convert_case 0.5.0", "proc-macro-error", @@ -3428,7 +3428,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-pskt" -version = "0.15.2" +version = "0.15.3" dependencies = [ "bincode", "derive_builder", @@ -3455,7 +3455,7 @@ dependencies = [ [[package]] name = "kaspa-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3483,7 +3483,7 @@ dependencies = [ [[package]] name = "kaspa-wasm-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "faster-hex", "hexplay", @@ -3494,7 +3494,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-std", "async-trait", @@ -3530,7 +3530,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-example-subscriber" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ctrlc", "futures", @@ -3545,7 +3545,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-proxy" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "clap 4.5.19", @@ -3564,7 +3564,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-server" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -3592,7 +3592,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-simple-client-example" -version = "0.15.2" +version = "0.15.3" dependencies = [ "futures", "kaspa-rpc-core", @@ -3602,7 +3602,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ahash", "async-std", @@ -3632,7 +3632,7 @@ dependencies = [ [[package]] name = "kaspad" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -3640,6 +3640,7 @@ dependencies = [ "dhat", "dirs", "futures-util", + "itertools 0.13.0", "kaspa-addresses", "kaspa-addressmanager", "kaspa-alloc", @@ -3667,6 +3668,7 @@ dependencies = [ "num_cpus", "rand", "rayon", + "rocksdb", "serde", "serde_with", "tempfile", @@ -4972,7 +4974,7 @@ dependencies = [ [[package]] name = "rothschild" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "clap 4.5.19", @@ -5385,7 +5387,7 @@ dependencies = [ [[package]] name = "simpa" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index dd5eb3132..7141101f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,7 +63,7 @@ members = [ [workspace.package] rust-version = "1.81.0" -version = "0.15.2" +version = "0.15.3" authors = ["Kaspa developers"] license = "ISC" repository = "https://github.com/kaspanet/rusty-kaspa" @@ -80,61 +80,61 @@ include = [ ] [workspace.dependencies] -# kaspa-testing-integration = { version = "0.15.2", path = "testing/integration" } -kaspa-addresses = { version = "0.15.2", path = "crypto/addresses" } -kaspa-addressmanager = { version = "0.15.2", path = "components/addressmanager" } -kaspa-bip32 = { version = "0.15.2", path = "wallet/bip32" } -kaspa-cli = { version = "0.15.2", path = "cli" } -kaspa-connectionmanager = { version = "0.15.2", path = "components/connectionmanager" } -kaspa-consensus = { version = "0.15.2", path = "consensus" } -kaspa-consensus-core = { version = "0.15.2", path = "consensus/core" } -kaspa-consensus-client = { version = "0.15.2", path = "consensus/client" } -kaspa-consensus-notify = { version = "0.15.2", path = "consensus/notify" } -kaspa-consensus-wasm = { version = "0.15.2", path = "consensus/wasm" } -kaspa-consensusmanager = { version = "0.15.2", path = "components/consensusmanager" } -kaspa-core = { version = "0.15.2", path = "core" } -kaspa-daemon = { version = "0.15.2", path = "daemon" } -kaspa-database = { version = "0.15.2", path = "database" } -kaspa-grpc-client = { version = "0.15.2", path = "rpc/grpc/client" } -kaspa-grpc-core = { version = "0.15.2", path = "rpc/grpc/core" } -kaspa-grpc-server = { version = "0.15.2", path = "rpc/grpc/server" } -kaspa-hashes = { version = "0.15.2", path = "crypto/hashes" } -kaspa-index-core = { version = "0.15.2", path = "indexes/core" } -kaspa-index-processor = { version = "0.15.2", path = "indexes/processor" } -kaspa-math = { version = "0.15.2", path = "math" } -kaspa-merkle = { version = "0.15.2", path = "crypto/merkle" } -kaspa-metrics-core = { version = "0.15.2", path = "metrics/core" } -kaspa-mining = { version = "0.15.2", path = "mining" } -kaspa-mining-errors = { version = "0.15.2", path = "mining/errors" } -kaspa-muhash = { version = "0.15.2", path = "crypto/muhash" } -kaspa-notify = { version = "0.15.2", path = "notify" } -kaspa-p2p-flows = { version = "0.15.2", path = "protocol/flows" } -kaspa-p2p-lib = { version = "0.15.2", path = "protocol/p2p" } -kaspa-perf-monitor = { version = "0.15.2", path = "metrics/perf_monitor" } -kaspa-pow = { version = "0.15.2", path = "consensus/pow" } -kaspa-rpc-core = { version = "0.15.2", path = "rpc/core" } -kaspa-rpc-macros = { version = "0.15.2", path = "rpc/macros" } -kaspa-rpc-service = { version = "0.15.2", path = "rpc/service" } -kaspa-txscript = { version = "0.15.2", path = "crypto/txscript" } -kaspa-txscript-errors = { version = "0.15.2", path = "crypto/txscript/errors" } -kaspa-utils = { version = "0.15.2", path = "utils" } -kaspa-utils-tower = { version = "0.15.2", path = "utils/tower" } -kaspa-utxoindex = { version = "0.15.2", path = "indexes/utxoindex" } -kaspa-wallet = { version = "0.15.2", path = "wallet/native" } -kaspa-wallet-cli-wasm = { version = "0.15.2", path = "wallet/wasm" } -kaspa-wallet-keys = { version = "0.15.2", path = "wallet/keys" } -kaspa-wallet-pskt = { version = "0.15.2", path = "wallet/pskt" } -kaspa-wallet-core = { version = "0.15.2", path = "wallet/core" } -kaspa-wallet-macros = { version = "0.15.2", path = "wallet/macros" } -kaspa-wasm = { version = "0.15.2", path = "wasm" } -kaspa-wasm-core = { version = "0.15.2", path = "wasm/core" } -kaspa-wrpc-client = { version = "0.15.2", path = "rpc/wrpc/client" } -kaspa-wrpc-proxy = { version = "0.15.2", path = "rpc/wrpc/proxy" } -kaspa-wrpc-server = { version = "0.15.2", path = "rpc/wrpc/server" } -kaspa-wrpc-wasm = { version = "0.15.2", path = "rpc/wrpc/wasm" } -kaspa-wrpc-example-subscriber = { version = "0.15.2", path = "rpc/wrpc/examples/subscriber" } -kaspad = { version = "0.15.2", path = "kaspad" } -kaspa-alloc = { version = "0.15.2", path = "utils/alloc" } +# kaspa-testing-integration = { version = "0.15.3", path = "testing/integration" } +kaspa-addresses = { version = "0.15.3", path = "crypto/addresses" } +kaspa-addressmanager = { version = "0.15.3", path = "components/addressmanager" } +kaspa-bip32 = { version = "0.15.3", path = "wallet/bip32" } +kaspa-cli = { version = "0.15.3", path = "cli" } +kaspa-connectionmanager = { version = "0.15.3", path = "components/connectionmanager" } +kaspa-consensus = { version = "0.15.3", path = "consensus" } +kaspa-consensus-core = { version = "0.15.3", path = "consensus/core" } +kaspa-consensus-client = { version = "0.15.3", path = "consensus/client" } +kaspa-consensus-notify = { version = "0.15.3", path = "consensus/notify" } +kaspa-consensus-wasm = { version = "0.15.3", path = "consensus/wasm" } +kaspa-consensusmanager = { version = "0.15.3", path = "components/consensusmanager" } +kaspa-core = { version = "0.15.3", path = "core" } +kaspa-daemon = { version = "0.15.3", path = "daemon" } +kaspa-database = { version = "0.15.3", path = "database" } +kaspa-grpc-client = { version = "0.15.3", path = "rpc/grpc/client" } +kaspa-grpc-core = { version = "0.15.3", path = "rpc/grpc/core" } +kaspa-grpc-server = { version = "0.15.3", path = "rpc/grpc/server" } +kaspa-hashes = { version = "0.15.3", path = "crypto/hashes" } +kaspa-index-core = { version = "0.15.3", path = "indexes/core" } +kaspa-index-processor = { version = "0.15.3", path = "indexes/processor" } +kaspa-math = { version = "0.15.3", path = "math" } +kaspa-merkle = { version = "0.15.3", path = "crypto/merkle" } +kaspa-metrics-core = { version = "0.15.3", path = "metrics/core" } +kaspa-mining = { version = "0.15.3", path = "mining" } +kaspa-mining-errors = { version = "0.15.3", path = "mining/errors" } +kaspa-muhash = { version = "0.15.3", path = "crypto/muhash" } +kaspa-notify = { version = "0.15.3", path = "notify" } +kaspa-p2p-flows = { version = "0.15.3", path = "protocol/flows" } +kaspa-p2p-lib = { version = "0.15.3", path = "protocol/p2p" } +kaspa-perf-monitor = { version = "0.15.3", path = "metrics/perf_monitor" } +kaspa-pow = { version = "0.15.3", path = "consensus/pow" } +kaspa-rpc-core = { version = "0.15.3", path = "rpc/core" } +kaspa-rpc-macros = { version = "0.15.3", path = "rpc/macros" } +kaspa-rpc-service = { version = "0.15.3", path = "rpc/service" } +kaspa-txscript = { version = "0.15.3", path = "crypto/txscript" } +kaspa-txscript-errors = { version = "0.15.3", path = "crypto/txscript/errors" } +kaspa-utils = { version = "0.15.3", path = "utils" } +kaspa-utils-tower = { version = "0.15.3", path = "utils/tower" } +kaspa-utxoindex = { version = "0.15.3", path = "indexes/utxoindex" } +kaspa-wallet = { version = "0.15.3", path = "wallet/native" } +kaspa-wallet-cli-wasm = { version = "0.15.3", path = "wallet/wasm" } +kaspa-wallet-keys = { version = "0.15.3", path = "wallet/keys" } +kaspa-wallet-pskt = { version = "0.15.3", path = "wallet/pskt" } +kaspa-wallet-core = { version = "0.15.3", path = "wallet/core" } +kaspa-wallet-macros = { version = "0.15.3", path = "wallet/macros" } +kaspa-wasm = { version = "0.15.3", path = "wasm" } +kaspa-wasm-core = { version = "0.15.3", path = "wasm/core" } +kaspa-wrpc-client = { version = "0.15.3", path = "rpc/wrpc/client" } +kaspa-wrpc-proxy = { version = "0.15.3", path = "rpc/wrpc/proxy" } +kaspa-wrpc-server = { version = "0.15.3", path = "rpc/wrpc/server" } +kaspa-wrpc-wasm = { version = "0.15.3", path = "rpc/wrpc/wasm" } +kaspa-wrpc-example-subscriber = { version = "0.15.3", path = "rpc/wrpc/examples/subscriber" } +kaspad = { version = "0.15.3", path = "kaspad" } +kaspa-alloc = { version = "0.15.3", path = "utils/alloc" } # external aes = "0.8.3" diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f3ee51d9c..f8af5fb5a 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 3; +const LATEST_DB_VERSION: u32 = 4; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { @@ -219,6 +219,23 @@ impl MultiConsensusManagementStore { } } + /// Returns the current version of this database + pub fn version(&self) -> StoreResult { + match self.metadata.read() { + Ok(data) => Ok(data.version), + Err(err) => Err(err), + } + } + + /// Set the database version to a different one + pub fn set_version(&mut self, version: u32) -> StoreResult<()> { + self.metadata.update(DirectDbWriter::new(&self.db), |mut data| { + data.version = version; + data + })?; + Ok(()) + } + pub fn should_upgrade(&self) -> StoreResult { match self.metadata.read() { Ok(data) => Ok(data.version != LATEST_DB_VERSION), diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index d9e4ac7d1..6909562aa 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -243,7 +243,7 @@ impl Consensus { block_processors_pool, db.clone(), storage.statuses_store.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.block_transactions_store.clone(), storage.body_tips_store.clone(), @@ -500,7 +500,7 @@ impl ConsensusApi for Consensus { fn get_virtual_merge_depth_blue_work_threshold(&self) -> BlueWorkType { // PRUNE SAFETY: merge depth root is never close to being pruned (in terms of block depth) - self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_primary_store.get_blue_work(root).unwrap()) + self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_store.get_blue_work(root).unwrap()) } fn get_sink(&self) -> Hash { @@ -533,7 +533,7 @@ impl ConsensusApi for Consensus { for child in initial_children { if visited.insert(child) { - let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + let blue_work = self.ghostdag_store.get_blue_work(child).unwrap(); heap.push(Reverse(SortableBlock::new(child, blue_work))); } } @@ -560,7 +560,7 @@ impl ConsensusApi for Consensus { for child in children { if visited.insert(child) { - let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + let blue_work = self.ghostdag_store.get_blue_work(child).unwrap(); heap.push(Reverse(SortableBlock::new(child, blue_work))); } } @@ -909,7 +909,7 @@ impl ConsensusApi for Consensus { Some(BlockStatus::StatusInvalid) => return Err(ConsensusError::InvalidBlock(hash)), _ => {} }; - let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; + let ghostdag = self.ghostdag_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; Ok((&*ghostdag).into()) } @@ -985,7 +985,7 @@ impl ConsensusApi for Consensus { Ok(self .services .window_manager - .block_window(&self.ghostdag_primary_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) .unwrap() .deref() .iter() @@ -1024,7 +1024,7 @@ impl ConsensusApi for Consensus { match start_hash { Some(hash) => { self.validate_block_exists(hash)?; - let ghostdag_data = self.ghostdag_primary_store.get_data(hash).unwrap(); + let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); // The selected parent header is used within to check for sampling activation, so we verify its existence first if !self.headers_store.has(ghostdag_data.selected_parent).unwrap() { return Err(ConsensusError::DifficultyError(DifficultyError::InsufficientWindowData(0))); diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 38e283a14..97c6d0b76 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -53,8 +53,7 @@ pub struct ConsensusServices { pub reachability_service: MTReachabilityService, pub window_manager: DbWindowManager, pub dag_traversal_manager: DbDagTraversalManager, - pub ghostdag_managers: Arc>, - pub ghostdag_primary_manager: DbGhostdagManager, + pub ghostdag_manager: DbGhostdagManager, pub coinbase_manager: CoinbaseManager, pub pruning_point_manager: DbPruningPointManager, pub pruning_proof_manager: Arc, @@ -82,13 +81,13 @@ impl ConsensusServices { let reachability_service = MTReachabilityService::new(storage.reachability_store.clone()); let dag_traversal_manager = DagTraversalManager::new( params.genesis.hash, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), relations_service.clone(), reachability_service.clone(), ); let window_manager = DualWindowManager::new( ¶ms.genesis, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.daa_excluded_store.clone(), storage.block_window_cache_for_difficulty.clone(), @@ -110,27 +109,17 @@ impl ConsensusServices { params.genesis.hash, storage.depth_store.clone(), reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), ); - let ghostdag_managers = Arc::new( - storage - .ghostdag_stores - .iter() - .cloned() - .enumerate() - .map(|(level, ghostdag_store)| { - GhostdagManager::new( - params.genesis.hash, - params.ghostdag_k, - ghostdag_store, - relations_services[level].clone(), - storage.headers_store.clone(), - reachability_service.clone(), - ) - }) - .collect_vec(), + let ghostdag_manager = GhostdagManager::new( + params.genesis.hash, + params.ghostdag_k, + storage.ghostdag_store.clone(), + relations_services[0].clone(), + storage.headers_store.clone(), + reachability_service.clone(), + false, ); - let ghostdag_primary_manager = ghostdag_managers[0].clone(); let coinbase_manager = CoinbaseManager::new( params.coinbase_payload_script_public_key_max_len, @@ -165,7 +154,7 @@ impl ConsensusServices { params.finality_depth, params.genesis.hash, reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.past_pruning_points_store.clone(), storage.headers_selected_tip_store.clone(), @@ -184,7 +173,7 @@ impl ConsensusServices { &storage, parents_manager.clone(), reachability_service.clone(), - ghostdag_managers.clone(), + ghostdag_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), params.max_block_level, @@ -199,7 +188,7 @@ impl ConsensusServices { params.mergeset_size_limit as usize, reachability_service.clone(), dag_traversal_manager.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.selected_chain_store.clone(), storage.headers_selected_tip_store.clone(), storage.pruning_point_store.clone(), @@ -213,8 +202,7 @@ impl ConsensusServices { reachability_service, window_manager, dag_traversal_manager, - ghostdag_managers, - ghostdag_primary_manager, + ghostdag_manager, coinbase_manager, pruning_point_manager, pruning_proof_manager, diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index 89a0f5e26..ad3b95d1b 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -50,8 +50,7 @@ pub struct ConsensusStorage { pub selected_chain_store: Arc>, // Append-only stores - pub ghostdag_stores: Arc>>, - pub ghostdag_primary_store: Arc, + pub ghostdag_store: Arc, pub headers_store: Arc, pub block_transactions_store: Arc, pub past_pruning_points_store: Arc, @@ -193,19 +192,12 @@ impl ConsensusStorage { children_builder.build(), ))); - let ghostdag_stores = Arc::new( - (0..=params.max_block_level) - .map(|level| { - Arc::new(DbGhostdagStore::new( - db.clone(), - level, - ghostdag_builder.downscale(level).build(), - ghostdag_compact_builder.downscale(level).build(), - )) - }) - .collect_vec(), - ); - let ghostdag_primary_store = ghostdag_stores[0].clone(); + let ghostdag_store = Arc::new(DbGhostdagStore::new( + db.clone(), + 0, + ghostdag_builder.downscale(0).build(), + ghostdag_compact_builder.downscale(0).build(), + )); let daa_excluded_store = Arc::new(DbDaaStore::new(db.clone(), daa_excluded_builder.build())); let headers_store = Arc::new(DbHeadersStore::new(db.clone(), headers_builder.build(), headers_compact_builder.build())); let depth_store = Arc::new(DbDepthStore::new(db.clone(), header_data_builder.build())); @@ -245,8 +237,7 @@ impl ConsensusStorage { relations_stores, reachability_relations_store, reachability_store, - ghostdag_stores, - ghostdag_primary_store, + ghostdag_store, pruning_point_store, headers_selected_tip_store, body_tips_store, diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index a705d9ecc..472bdbd83 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -118,7 +118,7 @@ impl TestConsensus { pub fn build_header_with_parents(&self, hash: Hash, parents: Vec) -> Header { let mut header = header_from_precomputed_hash(hash, parents); - let ghostdag_data = self.consensus.services.ghostdag_primary_manager.ghostdag(header.direct_parents()); + let ghostdag_data = self.consensus.services.ghostdag_manager.ghostdag(header.direct_parents()); header.pruning_point = self .consensus .services @@ -201,7 +201,7 @@ impl TestConsensus { } pub fn ghostdag_store(&self) -> &Arc { - &self.consensus.ghostdag_primary_store + &self.consensus.ghostdag_store } pub fn reachability_store(&self) -> &Arc> { @@ -233,7 +233,7 @@ impl TestConsensus { } pub fn ghostdag_manager(&self) -> &DbGhostdagManager { - &self.consensus.services.ghostdag_primary_manager + &self.consensus.services.ghostdag_manager } } diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index bcf860b3a..fd2600a1c 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -270,6 +270,27 @@ impl DbGhostdagStore { } } + pub fn new_temp( + db: Arc, + level: BlockLevel, + cache_policy: CachePolicy, + compact_cache_policy: CachePolicy, + temp_index: u8, + ) -> Self { + assert_ne!(SEPARATOR, level, "level {} is reserved for the separator", level); + let lvl_bytes = level.to_le_bytes(); + let temp_index_bytes = temp_index.to_le_bytes(); + let prefix = DatabaseStorePrefixes::TempGhostdag.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + let compact_prefix = + DatabaseStorePrefixes::TempGhostdagCompact.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + Self { + db: Arc::clone(&db), + level, + access: CachedDbAccess::new(db.clone(), cache_policy, prefix), + compact_access: CachedDbAccess::new(db, compact_cache_policy, compact_prefix), + } + } + pub fn clone_with_new_cache(&self, cache_policy: CachePolicy, compact_cache_policy: CachePolicy) -> Self { Self::new(Arc::clone(&self.db), self.level, cache_policy, compact_cache_policy) } diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 6c93b91d9..4ecc761af 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -55,7 +55,7 @@ pub struct HeaderProcessingContext { pub known_parents: Vec, // Staging data - pub ghostdag_data: Option>>, + pub ghostdag_data: Option>, pub block_window_for_difficulty: Option>, pub block_window_for_past_median_time: Option>, pub mergeset_non_daa: Option, @@ -99,7 +99,7 @@ impl HeaderProcessingContext { /// Returns the primary (level 0) GHOSTDAG data of this header. /// NOTE: is expected to be called only after GHOSTDAG computation was pushed into the context pub fn ghostdag_data(&self) -> &Arc { - &self.ghostdag_data.as_ref().unwrap()[0] + self.ghostdag_data.as_ref().unwrap() } } @@ -127,7 +127,7 @@ pub struct HeaderProcessor { pub(super) relations_stores: Arc>>, pub(super) reachability_store: Arc>, pub(super) reachability_relations_store: Arc>, - pub(super) ghostdag_stores: Arc>>, + pub(super) ghostdag_store: Arc, pub(super) statuses_store: Arc>, pub(super) pruning_point_store: Arc>, pub(super) block_window_cache_for_difficulty: Arc, @@ -138,7 +138,7 @@ pub struct HeaderProcessor { pub(super) depth_store: Arc, // Managers and services - pub(super) ghostdag_managers: Arc>, + pub(super) ghostdag_manager: DbGhostdagManager, pub(super) dag_traversal_manager: DbDagTraversalManager, pub(super) window_manager: DbWindowManager, pub(super) depth_manager: DbBlockDepthManager, @@ -178,7 +178,7 @@ impl HeaderProcessor { relations_stores: storage.relations_stores.clone(), reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), - ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_store: storage.ghostdag_store.clone(), statuses_store: storage.statuses_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), @@ -188,7 +188,7 @@ impl HeaderProcessor { block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), window_manager: services.window_manager.clone(), reachability_service: services.reachability_service.clone(), @@ -344,18 +344,14 @@ impl HeaderProcessor { .collect_vec() } - /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) + /// Runs the GHOSTDAG algorithm and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { - let ghostdag_data = (0..=ctx.block_level as usize) - .map(|level| { - self.ghostdag_stores[level] - .get_data(ctx.hash) - .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_managers[level].ghostdag(&ctx.known_parents[level]))) - }) - .collect_vec(); - - self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); + let ghostdag_data = self + .ghostdag_store + .get_data(ctx.hash) + .unwrap_option() + .unwrap_or_else(|| Arc::new(self.ghostdag_manager.ghostdag(&ctx.known_parents[0]))); + self.counters.mergeset_counts.fetch_add(ghostdag_data.mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } @@ -369,10 +365,8 @@ impl HeaderProcessor { // // Append-only stores: these require no lock and hence done first in order to reduce locking time // + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap(); - for (level, datum) in ghostdag_data.iter().enumerate() { - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap(); - } if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); } @@ -393,8 +387,8 @@ impl HeaderProcessor { // time, and thus serializing this part will do no harm. However this should be benchmarked. The // alternative is to create a separate ReachabilityProcessor and to manage things more tightly. let mut staging = StagingReachabilityStore::new(self.reachability_store.upgradable_read()); - let selected_parent = ghostdag_data[0].selected_parent; - let mut reachability_mergeset = ghostdag_data[0].unordered_mergeset_without_selected_parent(); + let selected_parent = ghostdag_data.selected_parent; + let mut reachability_mergeset = ghostdag_data.unordered_mergeset_without_selected_parent(); reachability::add_block(&mut staging, ctx.hash, selected_parent, &mut reachability_mergeset).unwrap(); // Non-append only stores need to use write locks. @@ -448,10 +442,8 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); - for (level, datum) in ghostdag_data.iter().enumerate() { - // This data might have been already written when applying the pruning proof. - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); - } + // This data might have been already written when applying the pruning proof. + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); ctx.known_parents.into_iter().enumerate().for_each(|(level, parents_by_level)| { @@ -491,8 +483,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = - Some(self.ghostdag_managers.iter().map(|manager_by_level| Arc::new(manager_by_level.genesis_ghostdag_data())).collect()); + ctx.ghostdag_data = Some(Arc::new(self.ghostdag_manager.genesis_ghostdag_data())); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 35dc211d5..2de19c265 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -2,7 +2,7 @@ use crate::{ consensus::{ - services::{ConsensusServices, DbGhostdagManager, DbParentsManager, DbPruningPointManager}, + services::{ConsensusServices, DbParentsManager, DbPruningPointManager}, storage::ConsensusStorage, }, model::{ @@ -69,7 +69,6 @@ pub struct PruningProcessor { // Managers and Services reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, pruning_point_manager: DbPruningPointManager, pruning_proof_manager: Arc, parents_manager: DbParentsManager, @@ -107,7 +106,6 @@ impl PruningProcessor { db, storage: storage.clone(), reachability_service: services.reachability_service.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), pruning_point_manager: services.pruning_point_manager.clone(), pruning_proof_manager: services.pruning_proof_manager.clone(), parents_manager: services.parents_manager.clone(), @@ -284,7 +282,7 @@ impl PruningProcessor { let mut batch = WriteBatch::default(); // At this point keep_relations only holds level-0 relations which is the correct filtering criteria for primary GHOSTDAG for kept in keep_relations.keys().copied() { - let Some(ghostdag) = self.ghostdag_primary_store.get_data(kept).unwrap_option() else { + let Some(ghostdag) = self.ghostdag_store.get_data(kept).unwrap_option() else { continue; }; if ghostdag.unordered_mergeset().any(|h| !keep_relations.contains_key(&h)) { @@ -296,7 +294,7 @@ impl PruningProcessor { mutable_ghostdag.selected_parent = ORIGIN; } counter += 1; - self.ghostdag_primary_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); + self.ghostdag_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); } } self.db.write(batch).unwrap(); @@ -444,7 +442,10 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[lower_level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - self.ghostdag_stores[lower_level].delete_batch(&mut batch, current).unwrap_option(); + + if lower_level == 0 { + self.ghostdag_store.delete_batch(&mut batch, current).unwrap_option(); + } } } else { // Count only blocks which get fully pruned including DAG relations @@ -463,9 +464,10 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - self.ghostdag_stores[level].delete_batch(&mut batch, current).unwrap_option(); }); + self.ghostdag_store.delete_batch(&mut batch, current).unwrap_option(); + // Remove additional header related data self.daa_excluded_store.delete_batch(&mut batch, current).unwrap(); self.depth_store.delete_batch(&mut batch, current).unwrap(); diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 9af6879c7..4b571dddc 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -116,7 +116,7 @@ pub struct VirtualStateProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) headers_store: Arc, pub(super) daa_excluded_store: Arc, pub(super) block_transactions_store: Arc, @@ -191,7 +191,7 @@ impl VirtualStateProcessor { db, statuses_store: storage.statuses_store.clone(), headers_store: storage.headers_store.clone(), - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), block_transactions_store: storage.block_transactions_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), @@ -206,7 +206,7 @@ impl VirtualStateProcessor { pruning_utxoset_stores: storage.pruning_utxoset_stores.clone(), lkg_virtual_state: storage.lkg_virtual_state.clone(), - ghostdag_manager: services.ghostdag_primary_manager.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), reachability_service: services.reachability_service.clone(), relations_service: services.relations_service.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), @@ -303,7 +303,7 @@ impl VirtualStateProcessor { .expect("all possible rule errors are unexpected here"); // Update the pruning processor about the virtual state change - let sink_ghostdag_data = self.ghostdag_primary_store.get_compact_data(new_sink).unwrap(); + let sink_ghostdag_data = self.ghostdag_store.get_compact_data(new_sink).unwrap(); // Empty the channel before sending the new message. If pruning processor is busy, this step makes sure // the internal channel does not grow with no need (since we only care about the most recent message) let _consume = self.pruning_receiver.try_iter().count(); @@ -404,7 +404,7 @@ impl VirtualStateProcessor { } let header = self.headers_store.get_header(current).unwrap(); - let mergeset_data = self.ghostdag_primary_store.get_data(current).unwrap(); + let mergeset_data = self.ghostdag_store.get_data(current).unwrap(); let pov_daa_score = header.daa_score; let selected_parent_multiset_hash = self.utxo_multisets_store.get(selected_parent).unwrap(); @@ -569,7 +569,7 @@ impl VirtualStateProcessor { let mut heap = tips .into_iter() - .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_primary_store.get_blue_work(block).unwrap() }) + .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_store.get_blue_work(block).unwrap() }) .collect::>(); // The initial diff point is the previous sink @@ -591,7 +591,7 @@ impl VirtualStateProcessor { // 2. will be removed eventually by the bounded merge check. // Hence as an optimization we prefer removing such blocks in advance to allow valid tips to be considered. let filtering_root = self.depth_store.merge_depth_root(candidate).unwrap(); - let filtering_blue_work = self.ghostdag_primary_store.get_blue_work(filtering_root).unwrap_or_default(); + let filtering_blue_work = self.ghostdag_store.get_blue_work(filtering_root).unwrap_or_default(); return ( candidate, heap.into_sorted_iter().take_while(|s| s.blue_work >= filtering_blue_work).map(|s| s.hash).collect(), @@ -609,7 +609,7 @@ impl VirtualStateProcessor { if self.reachability_service.is_dag_ancestor_of(finality_point, parent) && !self.reachability_service.is_dag_ancestor_of_any(parent, &mut heap.iter().map(|sb| sb.hash)) { - heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_primary_store.get_blue_work(parent).unwrap() }); + heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_store.get_blue_work(parent).unwrap() }); } } drop(prune_guard); @@ -1147,7 +1147,7 @@ impl VirtualStateProcessor { // in depth of 2*finality_depth, and can give false negatives for smaller finality violations. let current_pp = self.pruning_point_store.read().pruning_point().unwrap(); let vf = self.virtual_finality_point(&self.lkg_virtual_state.load().ghostdag_data, current_pp); - let vff = self.depth_manager.calc_finality_point(&self.ghostdag_primary_store.get_data(vf).unwrap(), current_pp); + let vff = self.depth_manager.calc_finality_point(&self.ghostdag_store.get_data(vf).unwrap(), current_pp); let last_known_pp = pp_list.iter().rev().find(|pp| match self.statuses_store.read().get(pp.hash).unwrap_option() { Some(status) => status.is_valid(), diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 651688818..6daa3deb8 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -82,7 +82,7 @@ impl VirtualStateProcessor { for (i, (merged_block, txs)) in once((ctx.selected_parent(), selected_parent_transactions)) .chain( ctx.ghostdag_data - .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_primary_store.deref()) + .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_store.deref()) .map(|b| (b, self.block_transactions_store.get(b).unwrap())), ) .enumerate() diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 8dfe4e793..997c4eecb 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -29,6 +29,7 @@ pub struct GhostdagManager, pub(super) reachability_service: U, + use_score_as_work: bool, } impl GhostdagManager { @@ -39,8 +40,9 @@ impl, reachability_service: U, + use_score_as_work: bool, ) -> Self { - Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store } + Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, use_score_as_work } } pub fn genesis_ghostdag_data(&self) -> GhostdagData { @@ -115,14 +117,19 @@ impl = std::result::Result; struct CachedPruningPointData { pruning_point: Hash, @@ -88,6 +100,49 @@ impl Clone for CachedPruningPointData { } } +struct TempProofContext { + headers_store: Arc, + ghostdag_stores: Vec>, + relations_stores: Vec, + reachability_stores: Vec>>, + ghostdag_managers: + Vec, DbHeadersStore>>, + db_lifetime: DbLifetime, +} + +#[derive(Clone)] +struct RelationsStoreInFutureOfRoot { + relations_store: T, + reachability_service: U, + root: Hash, +} + +impl RelationsStoreReader for RelationsStoreInFutureOfRoot { + fn get_parents(&self, hash: Hash) -> Result { + self.relations_store.get_parents(hash).map(|hashes| { + Arc::new(hashes.iter().copied().filter(|h| self.reachability_service.is_dag_ancestor_of(self.root, *h)).collect_vec()) + }) + } + + fn get_children(&self, hash: Hash) -> StoreResult> { + // We assume hash is in future of root + assert!(self.reachability_service.is_dag_ancestor_of(self.root, hash)); + self.relations_store.get_children(hash) + } + + fn has(&self, hash: Hash) -> Result { + if self.reachability_service.is_dag_ancestor_of(self.root, hash) { + Ok(false) + } else { + self.relations_store.has(hash) + } + } + + fn counts(&self) -> Result<(usize, usize), kaspa_database::prelude::StoreError> { + unimplemented!() + } +} + pub struct PruningProofManager { db: Arc, @@ -95,8 +150,9 @@ pub struct PruningProofManager { reachability_store: Arc>, reachability_relations_store: Arc>, reachability_service: MTReachabilityService, - ghostdag_stores: Arc>>, + ghostdag_store: Arc, relations_stores: Arc>>, + level_relations_services: Vec>, pruning_point_store: Arc>, past_pruning_points_store: Arc, virtual_stores: Arc>, @@ -105,7 +161,7 @@ pub struct PruningProofManager { depth_store: Arc, selected_chain_store: Arc>, - ghostdag_managers: Arc>, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, parents_manager: DbParentsManager, @@ -129,7 +185,7 @@ impl PruningProofManager { storage: &Arc, parents_manager: DbParentsManager, reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, max_block_level: BlockLevel, @@ -145,7 +201,7 @@ impl PruningProofManager { reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, - ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_store: storage.ghostdag_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), past_pruning_points_store: storage.past_pruning_points_store.clone(), @@ -155,7 +211,6 @@ impl PruningProofManager { selected_chain_store: storage.selected_chain_store.clone(), depth_store: storage.depth_store.clone(), - ghostdag_managers, traversal_manager, window_manager, parents_manager, @@ -168,8 +223,13 @@ impl PruningProofManager { pruning_proof_m, anticone_finalization_depth, ghostdag_k, + ghostdag_manager, is_consensus_exiting, + + level_relations_services: (0..=max_block_level) + .map(|level| MTRelationsService::new(storage.relations_stores.clone().clone(), level)) + .collect_vec(), } } @@ -203,18 +263,30 @@ impl PruningProofManager { let pruning_point_header = proof[0].last().unwrap().clone(); let pruning_point = pruning_point_header.hash; - let proof_zero_set = BlockHashSet::from_iter(proof[0].iter().map(|header| header.hash)); + // Create a copy of the proof, since we're going to be mutating the proof passed to us + let proof_sets = (0..=self.max_block_level) + .map(|level| BlockHashSet::from_iter(proof[level as usize].iter().map(|header| header.hash))) + .collect_vec(); + let mut trusted_gd_map: BlockHashMap = BlockHashMap::new(); for tb in trusted_set.iter() { trusted_gd_map.insert(tb.block.hash(), tb.ghostdag.clone().into()); - if proof_zero_set.contains(&tb.block.hash()) { - continue; - } + let tb_block_level = calc_block_level(&tb.block.header, self.max_block_level); + + (0..=tb_block_level).for_each(|current_proof_level| { + // If this block was in the original proof, ignore it + if proof_sets[current_proof_level as usize].contains(&tb.block.hash()) { + return; + } - proof[0].push(tb.block.header.clone()); + proof[current_proof_level as usize].push(tb.block.header.clone()); + }); } - proof[0].sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); + proof.iter_mut().for_each(|level_proof| { + level_proof.sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); + }); + self.populate_reachability_and_headers(&proof); { @@ -229,47 +301,48 @@ impl PruningProofManager { for (level, headers) in proof.iter().enumerate() { trace!("Applying level {} from the pruning point proof", level); - self.ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + let mut level_ancestors: HashSet = HashSet::new(); + level_ancestors.insert(ORIGIN); + for header in headers.iter() { let parents = Arc::new( self.parents_manager .parents_at_level(header, level as BlockLevel) .iter() .copied() - .filter(|parent| self.ghostdag_stores[level].has(*parent).unwrap()) + .filter(|parent| level_ancestors.contains(parent)) .collect_vec() .push_if_empty(ORIGIN), ); self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); - let gd = if header.hash == self.genesis_hash { - self.ghostdag_managers[level].genesis_ghostdag_data() - } else if level == 0 { - if let Some(gd) = trusted_gd_map.get(&header.hash) { + + if level == 0 { + let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { - let calculated_gd = self.ghostdag_managers[level].ghostdag(&parents); + let calculated_gd = self.ghostdag_manager.ghostdag(&parents); // Override the ghostdag data with the real blue score and blue work GhostdagData { blue_score: header.blue_score, blue_work: header.blue_work, selected_parent: calculated_gd.selected_parent, - mergeset_blues: calculated_gd.mergeset_blues.clone(), - mergeset_reds: calculated_gd.mergeset_reds.clone(), - blues_anticone_sizes: calculated_gd.blues_anticone_sizes.clone(), + mergeset_blues: calculated_gd.mergeset_blues, + mergeset_reds: calculated_gd.mergeset_reds, + blues_anticone_sizes: calculated_gd.blues_anticone_sizes, } - } - } else { - self.ghostdag_managers[level].ghostdag(&parents) - }; - self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); + }; + self.ghostdag_store.insert(header.hash, Arc::new(gd)).unwrap(); + } + + level_ancestors.insert(header.hash); } } let virtual_parents = vec![pruning_point]; let virtual_state = Arc::new(VirtualState { parents: virtual_parents.clone(), - ghostdag_data: self.ghostdag_managers[0].ghostdag(&virtual_parents), + ghostdag_data: self.ghostdag_manager.ghostdag(&virtual_parents), ..VirtualState::default() }); self.virtual_stores.write().state.set(virtual_state).unwrap(); @@ -387,18 +460,16 @@ impl PruningProofManager { } } - pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { - if proof.len() != self.max_block_level as usize + 1 { - return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); - } + fn init_validate_pruning_point_proof_stores_and_processes( + &self, + proof: &PruningPointProof, + ) -> PruningImportResult { if proof[0].is_empty() { return Err(PruningImportError::PruningProofNotEnoughHeaders); } let headers_estimate = self.estimate_proof_unique_size(proof); - let proof_pp_header = proof[0].last().expect("checked if empty"); - let proof_pp = proof_pp_header.hash; - let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); let headers_store = @@ -428,6 +499,7 @@ impl PruningProofManager { relations_stores[level].clone(), headers_store.clone(), reachability_services[level].clone(), + level != 0, ) }) .collect_vec(); @@ -438,12 +510,30 @@ impl PruningProofManager { let level = level as usize; reachability::init(reachability_stores[level].write().deref_mut()).unwrap(); relations_stores[level].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap(); - ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + ghostdag_stores[level].insert(ORIGIN, ghostdag_managers[level].origin_ghostdag_data()).unwrap(); } db.write(batch).unwrap(); } + Ok(TempProofContext { db_lifetime, headers_store, ghostdag_stores, relations_stores, reachability_stores, ghostdag_managers }) + } + + fn populate_stores_for_validate_pruning_point_proof( + &self, + proof: &PruningPointProof, + ctx: &mut TempProofContext, + log_validating: bool, + ) -> PruningImportResult> { + let headers_store = &ctx.headers_store; + let ghostdag_stores = &ctx.ghostdag_stores; + let mut relations_stores = ctx.relations_stores.clone(); + let reachability_stores = &ctx.reachability_stores; + let ghostdag_managers = &ctx.ghostdag_managers; + + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; for level in (0..=self.max_block_level).rev() { // Before processing this level, check if the process is exiting so we can end early @@ -451,7 +541,9 @@ impl PruningProofManager { return Err(PruningImportError::PruningValidationInterrupted); } - info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); + if log_validating { + info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); + } let level_idx = level as usize; let mut selected_tip = None; for (i, header) in proof[level as usize].iter().enumerate() { @@ -533,49 +625,125 @@ impl PruningProofManager { selected_tip_by_level[level_idx] = selected_tip; } + Ok(selected_tip_by_level.into_iter().map(|selected_tip| selected_tip.unwrap()).collect()) + } + + fn validate_proof_selected_tip( + &self, + proof_selected_tip: Hash, + level: BlockLevel, + proof_pp_level: BlockLevel, + proof_pp: Hash, + proof_pp_header: &Header, + ) -> PruningImportResult<()> { + // A proof selected tip of some level has to be the proof suggested prunint point itself if its level + // is lower or equal to the pruning point level, or a parent of the pruning point on the relevant level + // otherwise. + if level <= proof_pp_level { + if proof_selected_tip != proof_pp { + return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(proof_selected_tip, level)); + } + } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&proof_selected_tip) { + return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(proof_selected_tip, level)); + } + + Ok(()) + } + + // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple + // that contains the ghostdag data of the proof and current consensus common ancestor. If no + // such ancestor exists, it returns None. + fn find_proof_and_consensus_common_ancestor_ghostdag_data( + &self, + proof_ghostdag_stores: &[Arc], + current_consensus_ghostdag_stores: &[Arc], + proof_selected_tip: Hash, + level: BlockLevel, + proof_selected_tip_gd: CompactGhostdagData, + ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { + let mut proof_current = proof_selected_tip; + let mut proof_current_gd = proof_selected_tip_gd; + loop { + match current_consensus_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + Some(current_gd) => { + break Some((proof_current_gd, current_gd)); + } + None => { + proof_current = proof_current_gd.selected_parent; + if proof_current.is_origin() { + break None; + } + proof_current_gd = proof_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); + } + }; + } + } + + pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { + if proof.len() != self.max_block_level as usize + 1 { + return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); + } + + // Initialize the stores for the proof + let mut proof_stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let proof_selected_tip_by_level = + self.populate_stores_for_validate_pruning_point_proof(proof, &mut proof_stores_and_processes, true)?; + let proof_ghostdag_stores = proof_stores_and_processes.ghostdag_stores; + + // Get the proof for the current consensus and recreate the stores for it + // This is expected to be fast because if a proof exists, it will be cached. + // If no proof exists, this is empty + let mut current_consensus_proof = self.get_pruning_point_proof(); + if current_consensus_proof.is_empty() { + // An empty proof can only happen if we're at genesis. We're going to create a proof for this case that contains the genesis header only + let genesis_header = self.headers_store.get_header(self.genesis_hash).unwrap(); + current_consensus_proof = Arc::new((0..=self.max_block_level).map(|_| vec![genesis_header.clone()]).collect_vec()); + } + let mut current_consensus_stores_and_processes = + self.init_validate_pruning_point_proof_stores_and_processes(¤t_consensus_proof)?; + let _ = self.populate_stores_for_validate_pruning_point_proof( + ¤t_consensus_proof, + &mut current_consensus_stores_and_processes, + false, + )?; + let current_consensus_ghostdag_stores = current_consensus_stores_and_processes.ghostdag_stores; + let pruning_read = self.pruning_point_store.read(); let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); - for (level_idx, selected_tip) in selected_tip_by_level.into_iter().enumerate() { + for (level_idx, selected_tip) in proof_selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; - let selected_tip = selected_tip.unwrap(); - if level <= proof_pp_level { - if selected_tip != proof_pp { - return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(selected_tip, level)); - } - } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&selected_tip) { - return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(selected_tip, level)); - } + self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; - let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); + + // Next check is to see if this proof is "better" than what's in the current consensus + // Step 1 - look at only levels that have a full proof (least 2m blocks in the proof) if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { continue; } - let mut proof_current = selected_tip; - let mut proof_current_gd = proof_selected_tip_gd; - let common_ancestor_data = loop { - match self.ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap_option() { - Some(current_gd) => { - break Some((proof_current_gd, current_gd)); - } - None => { - proof_current = proof_current_gd.selected_parent; - if proof_current.is_origin() { - break None; - } - proof_current_gd = ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap(); - } - }; - }; - - if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = common_ancestor_data { + // Step 2 - if we can find a common ancestor between the proof and current consensus + // we can determine if the proof is better. The proof is better if the blue work* difference between the + // old current consensus's tips and the common ancestor is less than the blue work difference between the + // proof's tip and the common ancestor. + // *Note: blue work is the same as blue score on levels higher than 0 + if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( + &proof_ghostdag_stores, + ¤t_consensus_ghostdag_stores, + selected_tip, + level, + proof_selected_tip_gd, + ) { let selected_tip_blue_work_diff = SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { - let parent_blue_work = self.ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); + let parent_blue_work = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); if parent_blue_work_diff >= selected_tip_blue_work_diff { @@ -593,15 +761,24 @@ impl PruningProofManager { return Ok(()); } + // If we got here it means there's no level with shared blocks + // between the proof and the current consensus. In this case we + // consider the proof to be better if it has at least one level + // with 2*self.pruning_proof_m blue blocks where consensus doesn't. for level in (0..=self.max_block_level).rev() { let level_idx = level as usize; + + let proof_selected_tip = proof_selected_tip_by_level[level_idx]; + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); + if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { + continue; + } + match relations_read[level_idx].get_parents(current_pp).unwrap_option() { Some(parents) => { - if parents - .iter() - .copied() - .any(|parent| self.ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) - { + if parents.iter().copied().any(|parent| { + current_consensus_ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m + }) { return Ok(()); } } @@ -614,45 +791,234 @@ impl PruningProofManager { drop(pruning_read); drop(relations_read); - drop(db_lifetime); + drop(proof_stores_and_processes.db_lifetime); + drop(current_consensus_stores_and_processes.db_lifetime); Err(PruningImportError::PruningProofNotEnoughHeaders) } + // The "current dag level" is the level right before the level whose parents are + // not the same as our header's direct parents + // + // Find the current DAG level by going through all the parents at each level, + // starting from the bottom level and see which is the first level that has + // parents that are NOT our current pp_header's direct parents. + fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { + let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); + pp_header + .parents_by_level + .iter() + .enumerate() + .skip(1) + .find_map(|(level, parents)| { + if BlockHashSet::from_iter(parents.iter().copied()) == direct_parents { + None + } else { + Some((level - 1) as BlockLevel) + } + }) + .unwrap_or(self.max_block_level) + } + + fn estimated_blue_depth_at_level_0(&self, level: BlockLevel, level_depth: u64, current_dag_level: BlockLevel) -> u64 { + level_depth.checked_shl(level.saturating_sub(current_dag_level) as u32).unwrap_or(level_depth) + } + + /// selected parent at level = the parent of the header at the level + /// with the highest blue_work + fn find_selected_parent_header_at_level( + &self, + header: &Header, + level: BlockLevel, + ) -> PruningProofManagerInternalResult> { + // Parents manager parents_at_level may return parents that aren't in relations_service, so it's important + // to filter to include only parents that are in relations_service. + let sp = self + .parents_manager + .parents_at_level(header, level) + .iter() + .copied() + .filter(|p| self.level_relations_services[level as usize].has(*p).unwrap()) + .filter_map(|p| self.headers_store.get_header(p).unwrap_option().map(|h| SortableBlock::new(p, h.blue_work))) + .max() + .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof("no parents with header".to_string()))?; + Ok(self.headers_store.get_header(sp.hash).expect("unwrapped above")) + } + + /// Find a sufficient root at a given level by going through the headers store and looking + /// for a deep enough level block + /// For each root candidate, fill in the ghostdag data to see if it actually is deep enough. + /// If the root is deep enough, it will satisfy these conditions + /// 1. block at depth 2m at this level ∈ Future(root) + /// 2. block at depth m at the next level ∈ Future(root) + /// + /// Returns: the filled ghostdag store from root to tip, the selected tip and the root + fn find_sufficient_root( + &self, + pp_header: &HeaderWithBlockLevel, + level: BlockLevel, + current_dag_level: BlockLevel, + required_block: Option, + temp_db: Arc, + ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { + // Step 1: Determine which selected tip to use + let selected_tip = if pp_header.block_level >= level { + pp_header.header.hash + } else { + self.find_selected_parent_header_at_level(&pp_header.header, level)?.hash + }; + + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); + let required_level_depth = 2 * self.pruning_proof_m; + + // We only have the headers store (which has level 0 blue_scores) to assemble the proof data from. + // We need to look deeper at higher levels (2x deeper every level) to find 2M (plus margin) blocks at that level + let mut required_base_level_depth = self.estimated_blue_depth_at_level_0( + level, + required_level_depth + 100, // We take a safety margin + current_dag_level, + ); + + let mut is_last_level_header; + let mut tries = 0; + + let block_at_depth_m_at_next_level = required_block.unwrap_or(selected_tip); + + loop { + // Step 2 - Find a deep enough root candidate + let block_at_depth_2m = match self.level_block_at_base_depth(level, selected_tip, required_base_level_depth) { + Ok((header, is_last_header)) => { + is_last_level_header = is_last_header; + header + } + Err(e) => return Err(e), + }; + + let root = if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m, block_at_depth_m_at_next_level) { + block_at_depth_2m + } else if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { + block_at_depth_m_at_next_level + } else { + // find common ancestor of block_at_depth_m_at_next_level and block_at_depth_2m in chain of block_at_depth_m_at_next_level + let mut common_ancestor = self.headers_store.get_header(block_at_depth_m_at_next_level).unwrap(); + + while !self.reachability_service.is_dag_ancestor_of(common_ancestor.hash, block_at_depth_2m) { + common_ancestor = match self.find_selected_parent_header_at_level(&common_ancestor, level) { + Ok(header) => header, + // Try to give this last header a chance at being root + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => break, + Err(e) => return Err(e), + }; + } + + common_ancestor.hash + }; + + if level == 0 { + return Ok((self.ghostdag_store.clone(), selected_tip, root)); + } + + // Step 3 - Fill the ghostdag data from root to tip + let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); + let has_required_block = self.fill_level_proof_ghostdag_data( + root, + pp_header.header.hash, + &ghostdag_store, + Some(block_at_depth_m_at_next_level), + level, + ); + + // Step 4 - Check if we actually have enough depth. + // Need to ensure this does the same 2M+1 depth that block_at_depth does + if has_required_block + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + { + break Ok((ghostdag_store, selected_tip, root)); + } + + tries += 1; + if is_last_level_header { + if has_required_block { + // Normally this scenario doesn't occur when syncing with nodes that already have the safety margin change in place. + // However, when syncing with an older node version that doesn't have a safety margin for the proof, it's possible to + // try to find 2500 depth worth of headers at a level, but the proof only contains about 2000 headers. To be able to sync + // with such an older node. As long as we found the required block, we can still proceed. + debug!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned. Required block found so trying anyway."); + break Ok((ghostdag_store, selected_tip, root)); + } else { + panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned"); + } + } + + // If we don't have enough depth now, we need to look deeper + required_base_level_depth = (required_base_level_depth as f64 * 1.1) as u64; + debug!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_base_level_depth}"); + } + } + + fn calc_gd_for_all_levels( + &self, + pp_header: &HeaderWithBlockLevel, + temp_db: Arc, + ) -> (Vec>, Vec, Vec) { + let current_dag_level = self.find_current_dag_level(&pp_header.header); + let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; + let mut root_by_level = vec![None; self.max_block_level as usize + 1]; + for level in (0..=self.max_block_level).rev() { + let level_usize = level as usize; + let required_block = if level != self.max_block_level { + let next_level_store = ghostdag_stores[level_usize + 1].as_ref().unwrap().clone(); + let block_at_depth_m_at_next_level = self + .block_at_depth(&*next_level_store, selected_tip_by_level[level_usize + 1].unwrap(), self.pruning_proof_m) + .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) + .unwrap(); + Some(block_at_depth_m_at_next_level) + } else { + None + }; + let (store, selected_tip, root) = self + .find_sufficient_root(pp_header, level, current_dag_level, required_block, temp_db.clone()) + .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); + ghostdag_stores[level_usize] = Some(store); + selected_tip_by_level[level_usize] = Some(selected_tip); + root_by_level[level_usize] = Some(root); + } + + ( + ghostdag_stores.into_iter().map(Option::unwrap).collect_vec(), + selected_tip_by_level.into_iter().map(Option::unwrap).collect_vec(), + root_by_level.into_iter().map(Option::unwrap).collect_vec(), + ) + } + pub(crate) fn build_pruning_point_proof(&self, pp: Hash) -> PruningPointProof { if pp == self.genesis_hash { return vec![]; } + let (_db_lifetime, temp_db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let pp_header = self.headers_store.get_header_with_block_level(pp).unwrap(); - let selected_tip_by_level = (0..=self.max_block_level) - .map(|level| { - if level <= pp_header.block_level { - pp - } else { - self.ghostdag_managers[level as usize].find_selected_parent( - self.parents_manager - .parents_at_level(&pp_header.header, level) - .iter() - .filter(|parent| self.ghostdag_stores[level as usize].has(**parent).unwrap()) - .cloned(), - ) - } - }) - .collect_vec(); + let (ghostdag_stores, selected_tip_by_level, roots_by_level) = self.calc_gd_for_all_levels(&pp_header, temp_db); (0..=self.max_block_level) .map(|level| { let level = level as usize; let selected_tip = selected_tip_by_level[level]; let block_at_depth_2m = self - .block_at_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .block_at_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) .map_err(|err| format!("level: {}, err: {}", level, err)) .unwrap(); - let root = if level != self.max_block_level as usize { + // TODO (relaxed): remove the assertion below + // (New Logic) This is the root we calculated by going through block relations + let root = roots_by_level[level]; + // (Old Logic) This is the root we can calculate given that the GD records are already filled + // The root calc logic below is the original logic before the on-demand higher level GD calculation + // We only need old_root to sanity check the new logic + let old_root = if level != self.max_block_level as usize { let block_at_depth_m_at_next_level = self - .block_at_depth(&*self.ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) + .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) .unwrap(); if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { @@ -661,7 +1027,7 @@ impl PruningProofManager { block_at_depth_2m } else { self.find_common_ancestor_in_chain_of_a( - &*self.ghostdag_stores[level], + &*ghostdag_stores[level], block_at_depth_m_at_next_level, block_at_depth_2m, ) @@ -672,30 +1038,39 @@ impl PruningProofManager { block_at_depth_2m }; + // new root is expected to be always an ancestor of old_root because new root takes a safety margin + assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); + let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - queue.push(Reverse(SortableBlock::new(root, self.ghostdag_stores[level].get_blue_work(root).unwrap()))); + queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { continue; } - if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) { + // The second condition is always expected to be true (ghostdag store will have the entry) + // because we are traversing the exact diamond (future(root) ⋂ past(tip)) for which we calculated + // GD for (see fill_level_proof_ghostdag_data). TODO (relaxed): remove the condition or turn into assertion + if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) + || !ghostdag_stores[level].has(current).is_ok_and(|found| found) + { continue; } headers.push(self.headers_store.get_header(current).unwrap()); for child in self.relations_stores.read()[level].get_children(current).unwrap().read().iter().copied() { - queue.push(Reverse(SortableBlock::new(child, self.ghostdag_stores[level].get_blue_work(child).unwrap()))); + queue.push(Reverse(SortableBlock::new(child, self.headers_store.get_header(child).unwrap().blue_work))); } } + // TODO (relaxed): remove the assertion below // Temp assertion for verifying a bug fix: assert that the full 2M chain is actually contained in the composed level proof let set = BlockHashSet::from_iter(headers.iter().map(|h| h.hash)); let chain_2m = self - .chain_up_to_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .chain_up_to_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) .map_err(|err| { dbg!(level, selected_tip, block_at_depth_2m, root); format!("Assert 2M chain -- level: {}, err: {}", level, err) @@ -706,13 +1081,13 @@ impl PruningProofManager { if !set.contains(&chain_hash) { let next_level_tip = selected_tip_by_level[level + 1]; let next_level_chain_m = - self.chain_up_to_depth(&*self.ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); + self.chain_up_to_depth(&*ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); let next_level_block_m = next_level_chain_m.last().copied().unwrap(); dbg!(next_level_chain_m.len()); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); dbg!(level, selected_tip, block_at_depth_2m, root); panic!("Assert 2M chain -- missing block {} at index {} out of {} chain blocks", chain_hash, i, chain_2m_len); } @@ -723,6 +1098,80 @@ impl PruningProofManager { .collect_vec() } + /// BFS forward iterates from root until selected tip, ignoring blocks in the antipast of selected_tip. + /// For each block along the way, insert that hash into the ghostdag_store + /// If we have a required_block to find, this will return true if that block was found along the way + fn fill_level_proof_ghostdag_data( + &self, + root: Hash, + selected_tip: Hash, + ghostdag_store: &Arc, + required_block: Option, + level: BlockLevel, + ) -> bool { + let relations_service = RelationsStoreInFutureOfRoot { + relations_store: self.level_relations_services[level as usize].clone(), + reachability_service: self.reachability_service.clone(), + root, + }; + let gd_manager = GhostdagManager::new( + root, + self.ghostdag_k, + ghostdag_store.clone(), + relations_service.clone(), + self.headers_store.clone(), + self.reachability_service.clone(), + level != 0, + ); + + ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); + + let mut topological_heap: BinaryHeap<_> = Default::default(); + let mut visited = BlockHashSet::new(); + for child in relations_service.get_children(root).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, + })); + } + + let mut has_required_block = required_block.is_some_and(|required_block| root == required_block); + loop { + let Some(current) = topological_heap.pop() else { + break; + }; + let current_hash = current.0.hash; + if !visited.insert(current_hash) { + continue; + } + + if !self.reachability_service.is_dag_ancestor_of(current_hash, selected_tip) { + // We don't care about blocks in the antipast of the selected tip + continue; + } + + if !has_required_block && required_block.is_some_and(|required_block| current_hash == required_block) { + has_required_block = true; + } + + let current_gd = gd_manager.ghostdag(&relations_service.get_parents(current_hash).unwrap()); + + ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists(); + + for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, + })); + } + } + + has_required_block + } + /// Copy of `block_at_depth` which returns the full chain up to depth. Temporarily used for assertion purposes. fn chain_up_to_depth( &self, @@ -780,6 +1229,42 @@ impl PruningProofManager { Ok(current) } + /// Finds the block on a given level that is at base_depth deep from it. + /// Also returns if the block was the last one in the level + /// base_depth = the blue score depth at level 0 + fn level_block_at_base_depth( + &self, + level: BlockLevel, + high: Hash, + base_depth: u64, + ) -> PruningProofManagerInternalResult<(Hash, bool)> { + let high_header = self + .headers_store + .get_header(high) + .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {base_depth}, {err}")))?; + let high_header_score = high_header.blue_score; + let mut current_header = high_header; + + let mut is_last_header = false; + + while current_header.blue_score + base_depth >= high_header_score { + if current_header.direct_parents().is_empty() { + break; + } + + current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + // We want to give this root a shot if all its past is pruned + is_last_header = true; + break; + } + Err(e) => return Err(e), + }; + } + Ok((current_header.hash, is_last_header)) + } + fn find_common_ancestor_in_chain_of_a( &self, ghostdag_store: &impl GhostdagStoreReader, @@ -816,7 +1301,7 @@ impl PruningProofManager { let mut current = hash; for _ in 0..=self.ghostdag_k { hashes.push(current); - let Some(parent) = self.ghostdag_stores[0].get_selected_parent(current).unwrap_option() else { + let Some(parent) = self.ghostdag_store.get_selected_parent(current).unwrap_option() else { break; }; if parent == self.genesis_hash || parent == blockhash::ORIGIN { @@ -836,7 +1321,7 @@ impl PruningProofManager { .traversal_manager .anticone(pruning_point, virtual_parents, None) .expect("no error is expected when max_traversal_allowed is None"); - let mut anticone = self.ghostdag_managers[0].sort_blocks(anticone); + let mut anticone = self.ghostdag_manager.sort_blocks(anticone); anticone.insert(0, pruning_point); let mut daa_window_blocks = BlockHashMap::new(); @@ -847,14 +1332,14 @@ impl PruningProofManager { for anticone_block in anticone.iter().copied() { let window = self .window_manager - .block_window(&self.ghostdag_stores[0].get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) .unwrap(); for hash in window.deref().iter().map(|block| block.0.hash) { if let Entry::Vacant(e) = daa_window_blocks.entry(hash) { e.insert(TrustedHeader { header: self.headers_store.get_header(hash).unwrap(), - ghostdag: (&*self.ghostdag_stores[0].get_data(hash).unwrap()).into(), + ghostdag: (&*self.ghostdag_store.get_data(hash).unwrap()).into(), }); } } @@ -862,7 +1347,7 @@ impl PruningProofManager { let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block); for hash in ghostdag_chain { if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) { - let ghostdag = self.ghostdag_stores[0].get_data(hash).unwrap(); + let ghostdag = self.ghostdag_store.get_data(hash).unwrap(); e.insert((&*ghostdag).into()); // We fill `ghostdag_blocks` only for kaspad-go legacy reasons, but the real set we @@ -894,7 +1379,7 @@ impl PruningProofManager { if header.blue_work < min_blue_work { continue; } - let ghostdag = (&*self.ghostdag_stores[0].get_data(current).unwrap()).into(); + let ghostdag = (&*self.ghostdag_store.get_data(current).unwrap()).into(); e.insert(TrustedHeader { header, ghostdag }); } let parents = self.relations_stores.read()[0].get_parents(current).unwrap(); diff --git a/database/src/key.rs b/database/src/key.rs index e8aeff091..83fa8ebb2 100644 --- a/database/src/key.rs +++ b/database/src/key.rs @@ -73,6 +73,8 @@ impl Display for DbKey { match prefix { Ghostdag | GhostdagCompact + | TempGhostdag + | TempGhostdagCompact | RelationsParents | RelationsChildren | Reachability diff --git a/database/src/registry.rs b/database/src/registry.rs index 752efb97b..36a728ebe 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -41,6 +41,10 @@ pub enum DatabaseStorePrefixes { ReachabilityTreeChildren = 30, ReachabilityFutureCoveringSet = 31, + // ---- Ghostdag Proof + TempGhostdag = 40, + TempGhostdagCompact = 41, + // ---- Metadata ---- MultiConsensusMetadata = 124, ConsensusEntries = 125, diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index 15a408dad..3507339f2 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -46,10 +46,12 @@ clap.workspace = true dhat = { workspace = true, optional = true } dirs.workspace = true futures-util.workspace = true +itertools.workspace = true log.workspace = true num_cpus.workspace = true rand.workspace = true rayon.workspace = true +rocksdb.workspace = true serde.workspace = true tempfile.workspace = true thiserror.workspace = true diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 4175206eb..db9f32c16 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -6,9 +6,12 @@ use kaspa_consensus_core::{ errors::config::{ConfigError, ConfigResult}, }; use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; -use kaspa_core::{core::Core, debug, info}; +use kaspa_core::{core::Core, debug, info, trace}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; -use kaspa_database::prelude::CachePolicy; +use kaspa_database::{ + prelude::{CachePolicy, DbWriter, DirectDbWriter}, + registry::DatabaseStorePrefixes, +}; use kaspa_grpc_server::service::GrpcService; use kaspa_notify::{address::tracker::Tracker, subscription::context::SubscriptionContext}; use kaspa_rpc_service::service::RpcCoreService; @@ -33,6 +36,7 @@ use kaspa_mining::{ }; use kaspa_p2p_flows::{flow_context::FlowContext, service::P2pService}; +use itertools::Itertools; use kaspa_perf_monitor::{builder::Builder as PerfMonitorBuilder, counters::CountersSnapshot}; use kaspa_utxoindex::{api::UtxoIndexProxy, UtxoIndex}; use kaspa_wrpc_server::service::{Options as WrpcServerOptions, WebSocketCounters as WrpcServerCounters, WrpcEncoding, WrpcService}; @@ -316,13 +320,106 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm && (meta_db.get_pinned(b"multi-consensus-metadata-key").is_ok_and(|r| r.is_some()) || MultiConsensusManagementStore::new(meta_db.clone()).should_upgrade().unwrap()) { - let msg = - "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; - get_user_approval_or_exit(msg, args.yes); + let mut mcms = MultiConsensusManagementStore::new(meta_db.clone()); + let version = mcms.version().unwrap(); + + // TODO: Update this entire section to a more robust implementation that allows applying multiple upgrade strategies. + // If I'm at version 3 and latest version is 7, I need to be able to upgrade to that version following the intermediate + // steps without having to delete the DB + if version == 3 { + let active_consensus_dir_name = mcms.active_consensus_dir_name().unwrap(); + + match active_consensus_dir_name { + Some(current_consensus_db) => { + // Apply soft upgrade logic: delete GD data from higher levels + // and then update DB version to 4 + let consensus_db = kaspa_database::prelude::ConnBuilder::default() + .with_db_path(consensus_db_dir.clone().join(current_consensus_db)) + .with_files_limit(1) + .build() + .unwrap(); + info!("Scanning for deprecated records to cleanup"); + + let mut gd_record_count: u32 = 0; + let mut compact_record_count: u32 = 0; + + let start_level: u8 = 1; + let start_level_bytes = start_level.to_le_bytes(); + let ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let ghostdag_prefix = ghostdag_prefix_vec.as_slice(); + + // This section is used to count the records to be deleted. It's not used for the actual delete. + for result in consensus_db.iterator(rocksdb::IteratorMode::From(ghostdag_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::Ghostdag.into()]) { + break; + } + + gd_record_count += 1; + } + + let compact_prefix_vec = DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let compact_prefix = compact_prefix_vec.as_slice(); + + for result in consensus_db.iterator(rocksdb::IteratorMode::From(compact_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::GhostdagCompact.into()]) { + break; + } + + compact_record_count += 1; + } + + trace!("Number of Ghostdag records to cleanup: {}", gd_record_count); + trace!("Number of GhostdagCompact records to cleanup: {}", compact_record_count); + info!("Number of deprecated records to cleanup: {}", gd_record_count + compact_record_count); + + let msg = + "Node database currently at version 3. Upgrade process to version 4 needs to be applied. Continue? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + // Actual delete only happens after user consents to the upgrade: + let mut writer = DirectDbWriter::new(&consensus_db); + + let end_level: u8 = config.max_block_level + 1; + let end_level_bytes = end_level.to_le_bytes(); - info!("Deleting databases from previous Kaspad version"); + let start_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let end_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(end_level_bytes).collect_vec(); - is_db_reset_needed = true; + let start_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let end_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(end_level_bytes).collect_vec(); + + // Apply delete of range from level 1 to max (+1) for Ghostdag and GhostdagCompact: + writer.delete_range(start_ghostdag_prefix_vec.clone(), end_ghostdag_prefix_vec.clone()).unwrap(); + writer.delete_range(start_compact_prefix_vec.clone(), end_compact_prefix_vec.clone()).unwrap(); + + // Compact the deleted rangeto apply the delete immediately + consensus_db.compact_range(Some(start_ghostdag_prefix_vec.as_slice()), Some(end_ghostdag_prefix_vec.as_slice())); + consensus_db.compact_range(Some(start_compact_prefix_vec.as_slice()), Some(end_compact_prefix_vec.as_slice())); + + // Also update the version to one higher: + mcms.set_version(version + 1).unwrap(); + } + None => { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + is_db_reset_needed = true; + } + } + } else { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + info!("Deleting databases from previous Kaspad version"); + + is_db_reset_needed = true; + } } // Will be true if any of the other condition above except args.reset_db diff --git a/simpa/src/main.rs b/simpa/src/main.rs index c66656be3..2994b0a09 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -425,12 +425,12 @@ fn topologically_ordered_hashes(src_consensus: &Consensus, genesis_hash: Hash) - } fn print_stats(src_consensus: &Consensus, hashes: &[Hash], delay: f64, bps: f64, k: KType) -> usize { - let blues_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_blues.len()).sum::() as f64 - / hashes.len() as f64; - let reds_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_reds.len()).sum::() as f64 - / hashes.len() as f64; + let blues_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_blues.len()).sum::() + as f64 + / hashes.len() as f64; + let reds_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_reds.len()).sum::() + as f64 + / hashes.len() as f64; let parents_mean = hashes.iter().map(|&h| src_consensus.headers_store.get_header(h).unwrap().direct_parents().len()).sum::() as f64 / hashes.len() as f64;