diff --git a/Cargo.lock b/Cargo.lock index 59ba9406d..2eeb02b84 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2492,6 +2492,7 @@ dependencies = [ "futures", "futures-util", "h2", + "itertools 0.11.0", "kaspa-core", "kaspa-grpc-core", "kaspa-notify", @@ -2499,6 +2500,7 @@ dependencies = [ "kaspa-utils", "kaspa-utils-tower", "log", + "parking_lot", "paste", "prost", "rand 0.8.5", @@ -2740,13 +2742,18 @@ dependencies = [ "async-channel 2.1.1", "async-trait", "borsh", + "criterion", "derive_more", "futures", "futures-util", + "indexmap 2.1.0", "itertools 0.11.0", "kaspa-addresses", + "kaspa-alloc", "kaspa-consensus-core", "kaspa-core", + "kaspa-hashes", + "kaspa-math", "kaspa-txscript", "kaspa-txscript-errors", "kaspa-utils", @@ -2760,6 +2767,7 @@ dependencies = [ "triggered", "workflow-core", "workflow-log", + "workflow-perf-monitor", ] [[package]] @@ -2956,6 +2964,7 @@ dependencies = [ "kaspa-utxoindex", "log", "tokio", + "triggered", "workflow-rpc", ] @@ -2964,9 +2973,13 @@ name = "kaspa-testing-integration" version = "0.13.6" dependencies = [ "async-channel 2.1.1", + "async-trait", "bincode", + "chrono", + "clap 4.4.11", "criterion", "crossbeam-channel", + "dhat", "faster-hex 0.6.1", "flate2", "futures-util", @@ -2983,6 +2996,7 @@ dependencies = [ "kaspa-database", "kaspa-grpc-client", "kaspa-grpc-core", + "kaspa-grpc-server", "kaspa-hashes", "kaspa-index-processor", "kaspa-math", @@ -2991,6 +3005,7 @@ dependencies = [ "kaspa-notify", "kaspa-pow", "kaspa-rpc-core", + "kaspa-rpc-service", "kaspa-txscript", "kaspa-txscript-errors", "kaspa-utils", @@ -3010,6 +3025,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", + "workflow-perf-monitor", ] [[package]] @@ -3346,6 +3362,7 @@ dependencies = [ "kaspa-hashes", "kaspa-index-processor", "kaspa-mining", + "kaspa-notify", "kaspa-p2p-flows", "kaspa-perf-monitor", "kaspa-rpc-core", @@ -4663,6 +4680,7 @@ dependencies = [ "kaspa-consensus-core", "kaspa-core", "kaspa-grpc-client", + "kaspa-notify", "kaspa-rpc-core", "kaspa-txscript", "kaspa-utils", diff --git a/Cargo.toml b/Cargo.toml index ca39e082e..4666e211e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,8 +51,8 @@ members = [ "utils", "utils/tower", "rothschild", - "metrics/perf_monitor", "metrics/core", + "metrics/perf_monitor", "utils/alloc", ] diff --git a/consensus/core/src/errors/config.rs b/consensus/core/src/errors/config.rs index 2b73e07e9..fb2656d46 100644 --- a/consensus/core/src/errors/config.rs +++ b/consensus/core/src/errors/config.rs @@ -14,6 +14,9 @@ pub enum ConfigError { #[error("Configuration: --ram-scale cannot be set above 10.0")] RamScaleTooHigh, + #[error("Configuration: --max-tracked-addresses cannot be set above {0}")] + MaxTrackedAddressesTooHigh(usize), + #[cfg(feature = "devnet-prealloc")] #[error("Cannot preallocate UTXOs on any network except devnet")] PreallocUtxosOnNonDevnet, diff --git a/consensus/notify/src/notification.rs b/consensus/notify/src/notification.rs index af015e199..a9e758b0a 100644 --- a/consensus/notify/src/notification.rs +++ b/consensus/notify/src/notification.rs @@ -6,6 +6,7 @@ use kaspa_notify::{ full_featured, notification::Notification as NotificationTrait, subscription::{ + context::SubscriptionContext, single::{OverallSubscription, UtxosChangedSubscription, VirtualChainChangedSubscription}, Subscription, }, @@ -45,14 +46,18 @@ pub enum Notification { } impl NotificationTrait for Notification { - fn apply_overall_subscription(&self, subscription: &OverallSubscription) -> Option { + fn apply_overall_subscription(&self, subscription: &OverallSubscription, _context: &SubscriptionContext) -> Option { match subscription.active() { true => Some(self.clone()), false => None, } } - fn apply_virtual_chain_changed_subscription(&self, subscription: &VirtualChainChangedSubscription) -> Option { + fn apply_virtual_chain_changed_subscription( + &self, + subscription: &VirtualChainChangedSubscription, + _context: &SubscriptionContext, + ) -> Option { match subscription.active() { true => { // If the subscription excludes accepted transaction ids and the notification includes some @@ -72,7 +77,11 @@ impl NotificationTrait for Notification { } } - fn apply_utxos_changed_subscription(&self, _subscription: &UtxosChangedSubscription) -> Option { + fn apply_utxos_changed_subscription( + &self, + _subscription: &UtxosChangedSubscription, + _context: &SubscriptionContext, + ) -> Option { // No effort is made here to apply the subscription addresses. // This will be achieved farther along the notification backbone. Some(self.clone()) diff --git a/consensus/notify/src/service.rs b/consensus/notify/src/service.rs index 9b4b61390..1b669f387 100644 --- a/consensus/notify/src/service.rs +++ b/consensus/notify/src/service.rs @@ -12,6 +12,7 @@ use kaspa_core::{ use kaspa_notify::{ events::{EventSwitches, EVENT_TYPE_ARRAY}, subscriber::Subscriber, + subscription::{context::SubscriptionContext, MutationPolicies, UtxosChangedMutationPolicy}, }; use kaspa_utils::triggers::SingleTrigger; use std::sync::Arc; @@ -24,11 +25,24 @@ pub struct NotifyService { } impl NotifyService { - pub fn new(root: Arc, notification_receiver: Receiver) -> Self { + pub fn new( + root: Arc, + notification_receiver: Receiver, + subscription_context: SubscriptionContext, + ) -> Self { let root_events: EventSwitches = EVENT_TYPE_ARRAY[..].into(); let collector = Arc::new(ConsensusCollector::new(NOTIFY_SERVICE, notification_receiver, Arc::new(ConsensusConverter::new()))); let subscriber = Arc::new(Subscriber::new(NOTIFY_SERVICE, root_events, root, 0)); - let notifier = Arc::new(ConsensusNotifier::new(NOTIFY_SERVICE, root_events, vec![collector], vec![subscriber], 1)); + let policies = MutationPolicies::new(UtxosChangedMutationPolicy::Wildcard); + let notifier = Arc::new(ConsensusNotifier::new( + NOTIFY_SERVICE, + root_events, + vec![collector], + vec![subscriber], + subscription_context, + 1, + policies, + )); Self { notifier, shutdown: SingleTrigger::default() } } diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index fdf7c4729..c626e00ff 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -10,6 +10,7 @@ use kaspa_consensusmanager::{ConsensusFactory, ConsensusInstance, DynConsensusCt use kaspa_core::{core::Core, service::Service}; use kaspa_database::utils::DbLifetime; use kaspa_hashes::Hash; +use kaspa_notify::subscription::context::SubscriptionContext; use parking_lot::RwLock; use kaspa_database::create_temp_db; @@ -66,9 +67,9 @@ impl TestConsensus { } /// Creates a test consensus instance based on `config` with a temp DB and the provided `notification_sender` - pub fn with_notifier(config: &Config, notification_sender: Sender) -> Self { + pub fn with_notifier(config: &Config, notification_sender: Sender, context: SubscriptionContext) -> Self { let (db_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)); - let notification_root = Arc::new(ConsensusNotificationRoot::new(notification_sender)); + let notification_root = Arc::new(ConsensusNotificationRoot::with_context(notification_sender, context)); let counters = Default::default(); let tx_script_cache_counters = Default::default(); let consensus = Arc::new(Consensus::new( diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs index fa387ab58..ca3186368 100644 --- a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs +++ b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs @@ -35,7 +35,7 @@ impl TransactionValidator { // Storage mass hardfork was activated self.check_mass_commitment(tx)?; - if pov_daa_score < self.storage_mass_activation_daa_score + 10 { + if pov_daa_score < self.storage_mass_activation_daa_score + 10 && self.storage_mass_activation_daa_score > 0 { warn!("--------- Storage mass hardfork was activated successfully!!! --------- (DAA score: {})", pov_daa_score); } } diff --git a/core/src/core.rs b/core/src/core.rs index a7a940934..75c59fc25 100644 --- a/core/src/core.rs +++ b/core/src/core.rs @@ -67,13 +67,11 @@ impl Core { impl Shutdown for Core { fn shutdown(self: &Arc) { - let keep_running = self.keep_running.load(Ordering::SeqCst); - if !keep_running { + if self.keep_running.compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst).is_err() { return; } trace!("signaling core shutdown..."); - self.keep_running.store(false, Ordering::SeqCst); { for service in self.services.lock().unwrap().iter() { diff --git a/core/src/task/runtime.rs b/core/src/task/runtime.rs index 64f049524..13deaae6b 100644 --- a/core/src/task/runtime.rs +++ b/core/src/task/runtime.rs @@ -40,6 +40,10 @@ impl AsyncRuntime { self.services.lock().unwrap().push(service); } + pub fn find(&self, ident: &'static str) -> Option> { + self.services.lock().unwrap().iter().find(|s| (*s).clone().ident() == ident).cloned() + } + pub fn init(self: Arc, core: Arc) -> Vec> { trace!("initializing async-runtime service"); vec![thread::Builder::new().name(Self::IDENT.to_string()).spawn(move || self.worker(core)).unwrap()] diff --git a/indexes/core/src/notification.rs b/indexes/core/src/notification.rs index 5017649d7..409e7670e 100644 --- a/indexes/core/src/notification.rs +++ b/indexes/core/src/notification.rs @@ -5,6 +5,7 @@ use kaspa_notify::{ full_featured, notification::Notification as NotificationTrait, subscription::{ + context::SubscriptionContext, single::{OverallSubscription, UtxosChangedSubscription, VirtualChainChangedSubscription}, Subscription, }, @@ -23,22 +24,30 @@ pub enum Notification { } impl NotificationTrait for Notification { - fn apply_overall_subscription(&self, subscription: &OverallSubscription) -> Option { + fn apply_overall_subscription(&self, subscription: &OverallSubscription, _context: &SubscriptionContext) -> Option { match subscription.active() { true => Some(self.clone()), false => None, } } - fn apply_virtual_chain_changed_subscription(&self, _subscription: &VirtualChainChangedSubscription) -> Option { + fn apply_virtual_chain_changed_subscription( + &self, + _subscription: &VirtualChainChangedSubscription, + _context: &SubscriptionContext, + ) -> Option { Some(self.clone()) } - fn apply_utxos_changed_subscription(&self, subscription: &UtxosChangedSubscription) -> Option { + fn apply_utxos_changed_subscription( + &self, + subscription: &UtxosChangedSubscription, + context: &SubscriptionContext, + ) -> Option { match subscription.active() { true => { let Self::UtxosChanged(notification) = self else { return None }; - notification.apply_utxos_changed_subscription(subscription).map(Self::UtxosChanged) + notification.apply_utxos_changed_subscription(subscription, context).map(Self::UtxosChanged) } false => None, } @@ -69,12 +78,16 @@ impl UtxosChangedNotification { Self { added: Arc::new(utxos_changed.added), removed: Arc::new(utxos_changed.removed) } } - pub(crate) fn apply_utxos_changed_subscription(&self, subscription: &UtxosChangedSubscription) -> Option { + pub(crate) fn apply_utxos_changed_subscription( + &self, + subscription: &UtxosChangedSubscription, + context: &SubscriptionContext, + ) -> Option { if subscription.to_all() { Some(self.clone()) } else { - let added = Self::filter_utxo_set(&self.added, subscription); - let removed = Self::filter_utxo_set(&self.removed, subscription); + let added = Self::filter_utxo_set(&self.added, subscription, context); + let removed = Self::filter_utxo_set(&self.removed, subscription, context); if added.is_empty() && removed.is_empty() { None } else { @@ -83,24 +96,32 @@ impl UtxosChangedNotification { } } - fn filter_utxo_set(utxo_set: &UtxoSetByScriptPublicKey, subscription: &UtxosChangedSubscription) -> UtxoSetByScriptPublicKey { + fn filter_utxo_set( + utxo_set: &UtxoSetByScriptPublicKey, + subscription: &UtxosChangedSubscription, + context: &SubscriptionContext, + ) -> UtxoSetByScriptPublicKey { // As an optimization, we iterate over the smaller set (O(n)) among the two below // and check existence over the larger set (O(1)) let mut result = HashMap::default(); - if utxo_set.len() < subscription.addresses().len() { - utxo_set.iter().for_each(|(script_public_key, collection)| { - if subscription.addresses().contains_key(script_public_key) { - result.insert(script_public_key.clone(), collection.clone()); - } - }); + let subscription_data = subscription.data(); + if utxo_set.len() < subscription_data.len() { + { + utxo_set.iter().for_each(|(script_public_key, collection)| { + if subscription_data.contains(script_public_key, context) { + result.insert(script_public_key.clone(), collection.clone()); + } + }); + } } else { - subscription.addresses().iter().filter(|(script_public_key, _)| utxo_set.contains_key(script_public_key)).for_each( - |(script_public_key, _)| { + let tracker_data = context.address_tracker.data(); + subscription_data.iter().for_each(|index| { + if let Some(script_public_key) = tracker_data.get_index(*index) { if let Some(collection) = utxo_set.get(script_public_key) { result.insert(script_public_key.clone(), collection.clone()); } - }, - ); + } + }); } result } diff --git a/indexes/processor/src/service.rs b/indexes/processor/src/service.rs index f92d19605..81a440833 100644 --- a/indexes/processor/src/service.rs +++ b/indexes/processor/src/service.rs @@ -10,7 +10,9 @@ use kaspa_index_core::notifier::IndexNotifier; use kaspa_notify::{ connection::ChannelType, events::{EventSwitches, EventType}, - scope::{PruningPointUtxoSetOverrideScope, Scope, UtxosChangedScope}, + listener::ListenerLifespan, + scope::{PruningPointUtxoSetOverrideScope, UtxosChangedScope}, + subscription::{context::SubscriptionContext, MutationPolicies, UtxosChangedMutationPolicy}, }; use kaspa_utils::{channel::Channel, triggers::SingleTrigger}; use kaspa_utxoindex::api::UtxoIndexProxy; @@ -25,24 +27,33 @@ pub struct IndexService { } impl IndexService { - pub fn new(consensus_notifier: &Arc, utxoindex: Option) -> Self { + pub fn new( + consensus_notifier: &Arc, + subscription_context: SubscriptionContext, + utxoindex: Option, + ) -> Self { + // This notifier UTXOs subscription granularity to consensus notifier + let policies = MutationPolicies::new(UtxosChangedMutationPolicy::Wildcard); + // Prepare consensus-notify objects let consensus_notify_channel = Channel::::default(); - let consensus_notify_listener_id = consensus_notifier - .register_new_listener(ConsensusChannelConnection::new(consensus_notify_channel.sender(), ChannelType::Closable)); + let consensus_notify_listener_id = consensus_notifier.register_new_listener( + ConsensusChannelConnection::new(INDEX_SERVICE, consensus_notify_channel.sender(), ChannelType::Closable), + ListenerLifespan::Static(policies), + ); // Prepare the index-processor notifier // No subscriber is defined here because the subscription are manually created during the construction and never changed after that. let events: EventSwitches = [EventType::UtxosChanged, EventType::PruningPointUtxoSetOverride].as_ref().into(); let collector = Arc::new(Processor::new(utxoindex.clone(), consensus_notify_channel.receiver())); - let notifier = Arc::new(IndexNotifier::new(INDEX_SERVICE, events, vec![collector], vec![], 1)); + let notifier = Arc::new(IndexNotifier::new(INDEX_SERVICE, events, vec![collector], vec![], subscription_context, 1, policies)); // Manually subscribe to index-processor related event types consensus_notifier - .try_start_notify(consensus_notify_listener_id, Scope::UtxosChanged(UtxosChangedScope::default())) + .try_start_notify(consensus_notify_listener_id, UtxosChangedScope::default().into()) .expect("the subscription always succeeds"); consensus_notifier - .try_start_notify(consensus_notify_listener_id, Scope::PruningPointUtxoSetOverride(PruningPointUtxoSetOverrideScope {})) + .try_start_notify(consensus_notify_listener_id, PruningPointUtxoSetOverrideScope::default().into()) .expect("the subscription always succeeds"); Self { utxoindex, notifier, shutdown: SingleTrigger::default() } diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index 9b56bd00f..9f3290a51 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -29,6 +29,7 @@ kaspa-grpc-server.workspace = true kaspa-hashes.workspace = true kaspa-index-processor.workspace = true kaspa-mining.workspace = true +kaspa-notify.workspace = true kaspa-p2p-flows.workspace = true kaspa-perf-monitor.workspace = true kaspa-rpc-core.workspace = true diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index 727793037..d7254fb0d 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -1,28 +1,25 @@ -use clap::ArgAction; -#[allow(unused)] -use clap::{arg, command, Arg, Command}; -#[cfg(feature = "devnet-prealloc")] -use kaspa_addresses::Address; -#[cfg(feature = "devnet-prealloc")] -use kaspa_consensus_core::tx::{TransactionOutpoint, UtxoEntry}; -#[cfg(feature = "devnet-prealloc")] -use kaspa_txscript::pay_to_address_script; -use serde::Deserialize; -#[cfg(feature = "devnet-prealloc")] -use std::sync::Arc; -use std::{ffi::OsString, fs}; -use toml::from_str; - +use clap::{arg, Arg, ArgAction, Command}; use kaspa_consensus_core::{ config::Config, network::{NetworkId, NetworkType}, }; - use kaspa_core::kaspad_env::version; - +use kaspa_notify::address::tracker::Tracker; use kaspa_utils::networking::ContextualNetAddress; use kaspa_wrpc_server::address::WrpcNetAddress; +use serde::Deserialize; use serde_with::{serde_as, DisplayFromStr}; +use std::{ffi::OsString, fs}; +use toml::from_str; + +#[cfg(feature = "devnet-prealloc")] +use kaspa_addresses::Address; +#[cfg(feature = "devnet-prealloc")] +use kaspa_consensus_core::tx::{TransactionOutpoint, UtxoEntry}; +#[cfg(feature = "devnet-prealloc")] +use kaspa_txscript::pay_to_address_script; +#[cfg(feature = "devnet-prealloc")] +use std::sync::Arc; #[serde_as] #[derive(Debug, Clone, Deserialize)] @@ -63,6 +60,7 @@ pub struct Args { pub inbound_limit: usize, #[serde(rename = "rpcmaxclients")] pub rpc_max_clients: usize, + pub max_tracked_addresses: usize, pub enable_unsynced_mining: bool, pub enable_mainnet_mining: bool, pub testnet: bool, @@ -108,6 +106,7 @@ impl Default for Args { outbound_target: 8, inbound_limit: 128, rpc_max_clients: 128, + max_tracked_addresses: Tracker::DEFAULT_MAX_ADDRESSES, enable_unsynced_mining: false, enable_mainnet_mining: false, testnet: false, @@ -312,6 +311,14 @@ pub fn cli() -> Command { .help("Allow mainnet mining (do not use unless you know what you are doing)"), ) .arg(arg!(--utxoindex "Enable the UTXO index")) + .arg( + Arg::new("max-tracked-addresses") + .long("max-tracked-addresses") + .require_equals(true) + .value_parser(clap::value_parser!(usize)) + .help(format!("Max preallocated number of addresses tracking UTXO changed events (default: {}, maximum: {}). +Value 0 prevents the preallocation, leading to a 0 memory footprint as long as unused but then to a sub-optimal footprint when used.", Tracker::DEFAULT_MAX_ADDRESSES, Tracker::MAX_ADDRESS_UPPER_BOUND)), + ) .arg(arg!(--testnet "Use the test network")) .arg( Arg::new("netsuffix") @@ -418,6 +425,7 @@ impl Args { outbound_target: arg_match_unwrap_or::(&m, "outpeers", defaults.outbound_target), inbound_limit: arg_match_unwrap_or::(&m, "maxinpeers", defaults.inbound_limit), rpc_max_clients: arg_match_unwrap_or::(&m, "rpcmaxclients", defaults.rpc_max_clients), + max_tracked_addresses: arg_match_unwrap_or::(&m, "max-tracked-addresses", defaults.max_tracked_addresses), reset_db: arg_match_unwrap_or::(&m, "reset-db", defaults.reset_db), enable_unsynced_mining: arg_match_unwrap_or::(&m, "enable-unsynced-mining", defaults.enable_unsynced_mining), enable_mainnet_mining: arg_match_unwrap_or::(&m, "enable-mainnet-mining", defaults.enable_mainnet_mining), diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 130cf92d2..dd8a215e5 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -10,6 +10,7 @@ use kaspa_core::{core::Core, info, trace}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; use kaspa_database::prelude::CachePolicy; use kaspa_grpc_server::service::GrpcService; +use kaspa_notify::{address::tracker::Tracker, subscription::context::SubscriptionContext}; use kaspa_rpc_service::service::RpcCoreService; use kaspa_txscript::caches::TxScriptCacheCounters; use kaspa_utils::networking::ContextualNetAddress; @@ -91,6 +92,9 @@ pub fn validate_args(args: &Args) -> ConfigResult<()> { if args.ram_scale > 10.0 { return Err(ConfigError::RamScaleTooHigh); } + if args.max_tracked_addresses > Tracker::MAX_ADDRESS_UPPER_BOUND { + return Err(ConfigError::MaxTrackedAddressesTooHigh(Tracker::MAX_ADDRESS_UPPER_BOUND)); + } Ok(()) } @@ -356,7 +360,9 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let tick_service = Arc::new(TickService::new()); let (notification_send, notification_recv) = unbounded(); - let notification_root = Arc::new(ConsensusNotificationRoot::new(notification_send)); + let max_tracked_addresses = if args.utxoindex && args.max_tracked_addresses > 0 { Some(args.max_tracked_addresses) } else { None }; + let subscription_context = SubscriptionContext::with_options(max_tracked_addresses); + let notification_root = Arc::new(ConsensusNotificationRoot::with_context(notification_send, subscription_context.clone())); let processing_counters = Arc::new(ProcessingCounters::default()); let mining_counters = Arc::new(MiningCounters::default()); let wrpc_borsh_counters = Arc::new(WrpcServerCounters::default()); @@ -395,7 +401,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm Arc::new(perf_monitor_builder.build()) }; - let notify_service = Arc::new(NotifyService::new(notification_root.clone(), notification_recv)); + let notify_service = Arc::new(NotifyService::new(notification_root.clone(), notification_recv, subscription_context.clone())); let index_service: Option> = if args.utxoindex { // Use only a single thread for none-consensus databases let utxoindex_db = kaspa_database::prelude::ConnBuilder::default() @@ -404,7 +410,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm .build() .unwrap(); let utxoindex = UtxoIndexProxy::new(UtxoIndex::new(consensus_manager.clone(), utxoindex_db).unwrap()); - let index_service = Arc::new(IndexService::new(¬ify_service.notifier(), Some(utxoindex))); + let index_service = Arc::new(IndexService::new(¬ify_service.notifier(), subscription_context.clone(), Some(utxoindex))); Some(index_service) } else { None @@ -448,6 +454,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm index_service.as_ref().map(|x| x.notifier()), mining_manager, flow_context, + subscription_context, index_service.as_ref().map(|x| x.utxoindex().unwrap()), config.clone(), core.clone(), @@ -458,6 +465,19 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm p2p_tower_counters.clone(), grpc_tower_counters.clone(), )); + let grpc_service_broadcasters: usize = 3; // TODO: add a command line argument or derive from other arg/config/host-related fields + let grpc_service = if !args.disable_grpc { + Some(Arc::new(GrpcService::new( + grpc_server_addr, + config, + rpc_core_service.clone(), + args.rpc_max_clients, + grpc_service_broadcasters, + grpc_tower_counters, + ))) + } else { + None + }; // Create an async runtime and register the top-level async services let async_runtime = Arc::new(AsyncRuntime::new(args.async_threads)); @@ -470,10 +490,8 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm async_runtime.register(Arc::new(port_mapping_extender_svc)) }; async_runtime.register(rpc_core_service.clone()); - if !args.disable_grpc { - let grpc_service = - Arc::new(GrpcService::new(grpc_server_addr, config, rpc_core_service.clone(), args.rpc_max_clients, grpc_tower_counters)); - async_runtime.register(grpc_service); + if let Some(grpc_service) = grpc_service { + async_runtime.register(grpc_service) } async_runtime.register(p2p_service); async_runtime.register(consensus_monitor); diff --git a/notify/Cargo.toml b/notify/Cargo.toml index cce40d6f8..09d3f5865 100644 --- a/notify/Cargo.toml +++ b/notify/Cargo.toml @@ -16,10 +16,12 @@ borsh.workspace = true derive_more.workspace = true futures-util.workspace = true futures.workspace = true +indexmap.workspace = true itertools.workspace = true kaspa-addresses.workspace = true kaspa-consensus-core.workspace = true kaspa-core.workspace = true +kaspa-hashes.workspace = true kaspa-txscript-errors.workspace = true kaspa-txscript.workspace = true kaspa-utils.workspace = true @@ -34,4 +36,12 @@ workflow-core.workspace = true workflow-log.workspace = true [dev-dependencies] +criterion.workspace = true +kaspa-alloc.workspace = true +kaspa-math.workspace = true tokio = { workspace = true, features = ["rt", "macros"] } +workflow-perf-monitor.workspace = true + +[[bench]] +name = "bench" +harness = false diff --git a/notify/benches/bench.rs b/notify/benches/bench.rs new file mode 100644 index 000000000..3bb292169 --- /dev/null +++ b/notify/benches/bench.rs @@ -0,0 +1,30 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use kaspa_addresses::{Address, Prefix}; +use kaspa_math::Uint256; +use kaspa_notify::{address::tracker::Indexes, subscription::context::SubscriptionContext}; + +fn create_addresses(count: usize) -> Vec
{ + (0..count) + .map(|i| Address::new(Prefix::Mainnet, kaspa_addresses::Version::PubKey, &Uint256::from_u64(i as u64).to_le_bytes())) + .collect() +} + +fn create_and_fill_context(addresses: Vec
) -> SubscriptionContext { + let mut indexes = Indexes::new(vec![]); + let context = SubscriptionContext::with_options(Some(ADDRESS_COUNT)); + let _ = context.address_tracker.register(&mut indexes, addresses); + context +} + +const ADDRESS_COUNT: usize = 1_000_000; + +pub fn bench_subscription_context(c: &mut Criterion) { + c.bench_function("create_and_fill_context", |b| { + let addresses = create_addresses(ADDRESS_COUNT); + b.iter(|| (black_box(create_and_fill_context(addresses.clone())))) + }); +} + +// `cargo bench --package kaspa-notify --bench bench` +criterion_group!(benches, bench_subscription_context); +criterion_main!(benches); diff --git a/notify/src/address.rs b/notify/src/address.rs deleted file mode 100644 index c26668ef6..000000000 --- a/notify/src/address.rs +++ /dev/null @@ -1,53 +0,0 @@ -use derive_more::Deref; -use kaspa_addresses::{Address, Prefix}; -use kaspa_consensus_core::tx::ScriptPublicKey; -use kaspa_txscript::{extract_script_pub_key_address, pay_to_address_script}; -use kaspa_txscript_errors::TxScriptError; - -#[allow(dead_code)] -/// Represents an [`Address`] and its matching [`ScriptPublicKey`] representation -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Hash, Deref)] -pub struct UtxoAddress(Address); - -impl UtxoAddress { - pub fn from_address(address: Address) -> Self { - Self(address) - } - - pub fn try_from_script(script_public_key: ScriptPublicKey, prefix: Prefix) -> Result { - Ok(Self::from_address(extract_script_pub_key_address(&script_public_key, prefix)?)) - } - - pub fn to_script_public_key(&self) -> ScriptPublicKey { - pay_to_address_script(self) - } -} - -impl From
for UtxoAddress { - fn from(address: Address) -> Self { - Self::from_address(address) - } -} - -impl From for Address { - fn from(item: UtxoAddress) -> Self { - item.0 - } -} - -pub mod test_helpers { - use super::*; - use kaspa_addresses::{Prefix, Version}; - - pub fn get_3_addresses(sorted: bool) -> Vec
{ - let mut addresses = vec![ - Address::new(Prefix::Mainnet, Version::PubKey, &[1u8; 32]), - Address::new(Prefix::Mainnet, Version::PubKey, &[2u8; 32]), - Address::new(Prefix::Mainnet, Version::PubKey, &[0u8; 32]), - ]; - if sorted { - addresses.sort() - } - addresses - } -} diff --git a/notify/src/address/error.rs b/notify/src/address/error.rs new file mode 100644 index 000000000..22f0df6a5 --- /dev/null +++ b/notify/src/address/error.rs @@ -0,0 +1,9 @@ +use thiserror::Error; + +#[derive(Clone, Debug, Error)] +pub enum Error { + #[error("the address store reached the maximum capacity")] + MaxCapacityReached, +} + +pub type Result = std::result::Result; diff --git a/notify/src/address/mod.rs b/notify/src/address/mod.rs new file mode 100644 index 000000000..76885f420 --- /dev/null +++ b/notify/src/address/mod.rs @@ -0,0 +1,21 @@ +pub mod error; +pub mod tracker; + +pub mod test_helpers { + use kaspa_addresses::Address; + use kaspa_addresses::{Prefix, Version}; + + pub const ADDRESS_PREFIX: Prefix = Prefix::Mainnet; + + pub fn get_3_addresses(sorted: bool) -> Vec
{ + let mut addresses = vec![ + Address::new(ADDRESS_PREFIX, Version::PubKey, &[1u8; 32]), + Address::new(ADDRESS_PREFIX, Version::PubKey, &[2u8; 32]), + Address::new(ADDRESS_PREFIX, Version::PubKey, &[0u8; 32]), + ]; + if sorted { + addresses.sort() + } + addresses + } +} diff --git a/notify/src/address/tracker.rs b/notify/src/address/tracker.rs new file mode 100644 index 000000000..55100a706 --- /dev/null +++ b/notify/src/address/tracker.rs @@ -0,0 +1,725 @@ +use crate::address::error::{Error, Result}; +use indexmap::{map::Entry, IndexMap}; +use itertools::Itertools; +use kaspa_addresses::{Address, Prefix}; +use kaspa_consensus_core::tx::ScriptPublicKey; +use kaspa_core::{debug, info, trace}; +use kaspa_txscript::{extract_script_pub_key_address, pay_to_address_script}; +use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use std::{ + collections::{hash_map, hash_set, HashMap, HashSet}, + fmt::Display, +}; + +pub trait Indexer { + fn contains(&self, index: Index) -> bool; + + /// Inserts an [`Index`]. + /// + /// Returns true if the index was not present and was successfully inserted, false otherwise. + fn insert(&mut self, index: Index) -> bool; + + /// Removes an [`Index`]. + /// + /// Returns true if the index was present and successfully removed, false otherwise. + fn remove(&mut self, index: Index) -> bool; + + fn len(&self) -> usize; + fn is_empty(&self) -> bool; +} + +pub type Index = u32; +pub type RefCount = u16; + +/// Tracks reference count of indexes +pub type Counters = CounterMap; + +/// Tracks indexes +pub type Indexes = IndexSet; + +/// Tracks reference count of indexes +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct CounterMap(HashMap); + +impl CounterMap { + pub fn new() -> Self { + Self(HashMap::new()) + } + + pub fn with_capacity(capacity: usize) -> Self { + Self(HashMap::with_capacity(capacity)) + } + + #[cfg(test)] + pub fn with_counters(counters: Vec) -> Self { + Self(counters.into_iter().map(|x| (x.index, x.count)).collect()) + } + + pub fn iter(&self) -> hash_map::Iter<'_, Index, RefCount> { + self.0.iter() + } + + pub fn len(&self) -> usize { + self.0.len() + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn capacity(&self) -> usize { + self.0.capacity() + } +} + +impl Indexer for CounterMap { + fn contains(&self, index: Index) -> bool { + self.0.contains_key(&index) + } + + fn insert(&mut self, index: Index) -> bool { + let mut result = true; + self.0 + .entry(index) + .and_modify(|x| { + *x += 1; + result = *x == 1; + }) + .or_insert(1); + result + } + + fn remove(&mut self, index: Index) -> bool { + let mut result = false; + self.0.entry(index).and_modify(|x| { + if *x > 0 { + *x -= 1; + result = *x == 0 + } + }); + result + } + + fn len(&self) -> usize { + self.len() + } + + fn is_empty(&self) -> bool { + self.is_empty() + } +} + +#[cfg(test)] +#[derive(Debug, Clone)] +#[cfg(test)] +pub struct Counter { + pub index: Index, + pub count: RefCount, + pub locked: bool, +} + +#[cfg(test)] +impl Counter { + pub fn new(index: Index, count: RefCount) -> Self { + Self { index, count, locked: false } + } + + pub fn active(&self) -> bool { + self.count > 0 + } +} + +#[cfg(test)] +impl PartialEq for Counter { + fn eq(&self, other: &Self) -> bool { + self.index == other.index + } +} +#[cfg(test)] +impl Eq for Counter {} + +#[cfg(test)] +impl PartialOrd for Counter { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +#[cfg(test)] +impl Ord for Counter { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.index.cmp(&other.index) + } +} + +/// Set of `Index` +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IndexSet(HashSet); + +impl IndexSet { + pub fn new(indexes: Vec) -> Self { + Self(indexes.into_iter().collect()) + } + + pub fn with_capacity(capacity: usize) -> Self { + Self(HashSet::with_capacity(capacity)) + } + + pub fn iter(&self) -> hash_set::Iter<'_, Index> { + self.0.iter() + } + + pub fn len(&self) -> usize { + self.0.len() + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn capacity(&self) -> usize { + self.0.capacity() + } + + pub fn drain(&mut self) -> hash_set::Drain<'_, Index> { + self.0.drain() + } +} + +impl Indexer for IndexSet { + fn contains(&self, index: Index) -> bool { + self.0.contains(&index) + } + + fn insert(&mut self, index: Index) -> bool { + self.0.insert(index) + } + + fn remove(&mut self, index: Index) -> bool { + self.0.remove(&index) + } + + fn len(&self) -> usize { + self.len() + } + + fn is_empty(&self) -> bool { + self.is_empty() + } +} + +#[derive(Debug)] +struct Inner { + /// Index-based map of [`ScriptPublicKey`] to its reference count + /// + /// ### Implementation note + /// + /// The whole purpose of the tracker is to reduce a [`ScriptPublicKey`] to an [`Index`] in all + /// [`Indexer`] instances. Therefore, every mutable access to the struct must be careful not to + /// use `IndexMap` APIs which alter the index order of existing entries. + script_pub_keys: IndexMap, + + /// Maximum address count that can be registered + max_addresses: Option, + + /// Set of entries [`Index`] in `script_pub_keys` having their [`RefCount`] at 0 hence considered + /// empty. + /// + /// An empty entry can be recycled and hold a new `script_pub_key`. + empty_entries: HashSet, +} + +impl Inner { + /// The upper bound of the maximum address count + const MAX_ADDRESS_UPPER_BOUND: usize = Self::expand_max_addresses(10_000_000); + + /// The lower bound of the maximum address count + const MAX_ADDRESS_LOWER_BOUND: usize = 6; + + /// Computes the optimal expanded max address count fitting in the actual allocated size of + /// the internal storage structure + const fn expand_max_addresses(max_addresses: usize) -> usize { + if max_addresses >= Self::MAX_ADDRESS_LOWER_BOUND { + // The following formula matches the internal allocation of an IndexMap or a HashMap + // as found in fns hashbrown::raw::inner::{capacity_to_buckets, bucket_mask_to_capacity}. + // + // The last allocated entry is reserved for recycling entries, hence the plus and minus 1 + // which differ from the hashbrown formula. + ((max_addresses + 1) * 8 / 7).next_power_of_two() * 7 / 8 - 1 + } else { + Self::MAX_ADDRESS_LOWER_BOUND + } + } + + fn new(max_addresses: Option) -> Self { + // Expands the maximum address count to the IndexMap actual usable allocated size minus 1. + // Saving one entry for the insert/swap_remove scheme during entry recycling prevents a reallocation + // when reaching the maximum. + let max_addresses = max_addresses.map(Self::expand_max_addresses); + let capacity = max_addresses.map(|x| x + 1).unwrap_or_default(); + + assert!( + capacity <= Self::MAX_ADDRESS_UPPER_BOUND + 1, + "Tracker maximum address count cannot exceed {}", + Self::MAX_ADDRESS_UPPER_BOUND + ); + + let script_pub_keys = IndexMap::with_capacity(capacity); + debug!("Creating an address tracker with a capacity of {}", script_pub_keys.capacity()); + if let Some(max_addresses) = max_addresses { + info!("Tracking UTXO changed events for {} addresses at most", max_addresses); + } + let empty_entries = HashSet::with_capacity(capacity); + Self { script_pub_keys, max_addresses, empty_entries } + } + + fn is_full(&self) -> bool { + self.script_pub_keys.len() >= self.max_addresses.unwrap_or(Self::MAX_ADDRESS_UPPER_BOUND) && self.empty_entries.is_empty() + } + + fn get(&self, spk: &ScriptPublicKey) -> Option<(Index, RefCount)> { + self.script_pub_keys.get_full(spk).map(|(index, _, count)| (index as Index, *count)) + } + + fn get_index(&self, index: Index) -> Option<&ScriptPublicKey> { + self.script_pub_keys.get_index(index as usize).map(|(spk, _)| spk) + } + + fn get_index_address(&self, index: Index, prefix: Prefix) -> Option
{ + self.script_pub_keys + .get_index(index as usize) + .map(|(spk, _)| extract_script_pub_key_address(spk, prefix).expect("is retro-convertible")) + } + + fn get_or_insert(&mut self, spk: ScriptPublicKey) -> Result { + match self.is_full() { + false => match self.script_pub_keys.entry(spk) { + Entry::Occupied(entry) => Ok(entry.index() as Index), + Entry::Vacant(entry) => { + let mut index = entry.index() as Index; + trace!( + "AddressTracker insert #{} {}", + index, + extract_script_pub_key_address(entry.key(), Prefix::Mainnet).unwrap() + ); + let _ = *entry.insert(0); + + // Try to recycle an empty entry if there is some + let mut recycled = false; + if (index + 1) as usize == self.script_pub_keys.len() && !self.empty_entries.is_empty() { + // Takes the first empty entry index + let empty_index = self.empty_entries.iter().cloned().next(); + if let Some(empty_index) = empty_index { + // Stores the newly created entry at the empty entry index while keeping it registered as an + // empty entry (because it is so at this stage, the ref count being 0). + self.script_pub_keys.swap_remove_index(empty_index as usize); + index = empty_index; + recycled = true; + } + } + // If no recycling occurred, registers the newly created entry as empty (since ref count is 0). + if !recycled { + self.empty_entries.insert(index); + } + Ok(index) + } + }, + true => match self.script_pub_keys.get_index_of(&spk) { + Some(index) => Ok(index as Index), + None => Err(Error::MaxCapacityReached), + }, + } + } + + /// Increases by one the [`RefCount`] of the [`ScriptPublicKey`] at `index`. + /// + /// If the entry had a reference count of 0 before the increase, its index is removed from + /// the empty entries set. + fn inc_count(&mut self, index: Index) { + if let Some((_, count)) = self.script_pub_keys.get_index_mut(index as usize) { + *count += 1; + trace!("AddressTracker inc count #{} to {}", index, *count); + if *count == 1 { + self.empty_entries.remove(&index); + } + } + } + + /// Decreases by one the [`RefCount`] of the [`ScriptPublicKey`] at `index`. + /// + /// Panics if the ref count is already 0. + /// + /// When the reference count reaches zero, the index is inserted into the empty entries set. + fn dec_count(&mut self, index: Index) { + if let Some((_, count)) = self.script_pub_keys.get_index_mut(index as usize) { + if *count == 0 { + panic!("Address tracker is trying to decrease an address counter that is already at zero"); + } + *count -= 1; + trace!("AddressTracker dec count #{} to {}", index, *count); + if *count == 0 { + self.empty_entries.insert(index); + } + } + } + + fn len(&self) -> usize { + assert!(self.script_pub_keys.len() >= self.empty_entries.len(), "entries marked empty are never removed from script_pub_keys"); + self.script_pub_keys.len() - self.empty_entries.len() + } + + fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +/// Tracker of a set of [`Address`](kaspa_addresses::Address), indexing and counting registrations +/// +/// #### Implementation design +/// +/// Each [`Address`](kaspa_addresses::Address) is stored internally as a [`ScriptPubKey`](kaspa_consensus_core::tx::ScriptPublicKey). +/// This prevents inter-network duplication and optimizes UTXOs filtering efficiency. +/// +/// But consequently the address network prefix gets lost and must be globally provided when querying for addresses by indexes. +#[derive(Debug)] +pub struct Tracker { + inner: RwLock, +} + +impl Display for Tracker { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{} addresses", self.inner.read().script_pub_keys.len()) + } +} + +impl Tracker { + /// The upper bound of the maximum address count + pub const MAX_ADDRESS_UPPER_BOUND: usize = Inner::MAX_ADDRESS_UPPER_BOUND; + + /// Expanded count for a maximum of 1M addresses + pub const DEFAULT_MAX_ADDRESSES: usize = Self::expand_max_addresses(800); + + const ADDRESS_CHUNK_SIZE: usize = 1024; + + /// Computes the optimal expanded max address count fitting in the actual allocated size of + /// the internal storage structure + pub const fn expand_max_addresses(max_addresses: usize) -> usize { + Inner::expand_max_addresses(max_addresses) + } + + pub fn new(max_addresses: Option) -> Self { + Self { inner: RwLock::new(Inner::new(max_addresses)) } + } + + #[cfg(test)] + pub fn with_addresses(addresses: &[Address]) -> Self { + let tracker = Self { inner: RwLock::new(Inner::new(None)) }; + for chunk in addresses.chunks(Self::ADDRESS_CHUNK_SIZE) { + let mut inner = tracker.inner.write(); + for address in chunk { + let index = inner.get_or_insert(pay_to_address_script(address)).unwrap(); + inner.inc_count(index); + } + } + tracker + } + + pub fn data(&self) -> TrackerReadGuard<'_> { + TrackerReadGuard { guard: self.inner.read() } + } + + pub fn get(&self, spk: &ScriptPublicKey) -> Option<(Index, RefCount)> { + self.inner.read().get(spk) + } + + pub fn get_address(&self, address: &Address) -> Option<(Index, RefCount)> { + self.get(&pay_to_address_script(address)) + } + + pub fn get_address_at_index(&self, index: Index, prefix: Prefix) -> Option
{ + self.inner.read().get_index_address(index, prefix) + } + + pub fn contains(&self, indexes: &T, spk: &ScriptPublicKey) -> bool { + self.get(spk).is_some_and(|(index, _)| indexes.contains(index)) + } + + pub fn contains_address(&self, indexes: &T, address: &Address) -> bool { + self.contains(indexes, &pay_to_address_script(address)) + } + + /// Returns an index set containing the indexes of all the addresses both registered in the tracker and in `indexes`. + pub fn unregistering_indexes(&self, indexes: &Indexes, addresses: &[Address]) -> Indexes { + Indexes::new( + addresses + .iter() + .filter_map(|address| { + self.get(&pay_to_address_script(address)).and_then(|(index, _)| indexes.contains(index).then_some(index)) + }) + .collect(), + ) + } + + /// Tries to register an `Address` vector into an `Indexer`. The addresses are first registered in the tracker if unknown + /// yet and their reference count is increased when successfully inserted in the `Indexer`. + /// + /// On success, returns the addresses that were actually inserted in the `Indexer`. + /// + /// Fails if the maximum capacity gets reached, leaving the tracker unchanged. + pub fn register(&self, indexes: &mut T, mut addresses: Vec
) -> Result> { + let mut rollback: bool = false; + { + let mut counter: usize = 0; + let mut inner = self.inner.write(); + addresses.retain(|address| { + counter += 1; + if counter % Self::ADDRESS_CHUNK_SIZE == 0 { + RwLockWriteGuard::bump(&mut inner); + } + let spk = pay_to_address_script(address); + match inner.get_or_insert(spk) { + Ok(index) => { + if indexes.insert(index) { + inner.inc_count(index); + true + } else { + false + } + } + Err(Error::MaxCapacityReached) => { + // Rollback registration + rollback = true; + false + } + } + }); + } + match rollback { + false => Ok(addresses), + true => { + let _ = self.unregister(indexes, addresses); + Err(Error::MaxCapacityReached) + } + } + } + + /// Unregisters an `Address` vector from an `Indexer`. The addresses, when existing both in the tracker + /// and the `Indexer`, are first removed from the `Indexer` and on success get their reference count + /// decreased. + /// + /// Returns the addresses that where successfully unregistered from the `Indexer`. + pub fn unregister(&self, indexes: &mut T, mut addresses: Vec
) -> Vec
{ + if indexes.is_empty() { + vec![] + } else { + let mut counter: usize = 0; + let mut inner = self.inner.write(); + addresses.retain(|address| { + counter += 1; + if counter % Self::ADDRESS_CHUNK_SIZE == 0 { + RwLockWriteGuard::bump(&mut inner); + } + let spk = pay_to_address_script(address); + if let Some((index, _)) = inner.get(&spk) { + if indexes.remove(index) { + inner.dec_count(index); + true + } else { + false + } + } else { + false + } + }); + addresses + } + } + + /// Unregisters all indexes contained in `indexes`, draining it in the process. + pub fn unregister_indexes(&self, indexes: &mut Indexes) { + for chunk in &indexes.drain().chunks(Self::ADDRESS_CHUNK_SIZE) { + let mut inner = self.inner.write(); + chunk.for_each(|index| inner.dec_count(index)); + } + } + + pub fn to_addresses(&self, indexes: &[Index], prefix: Prefix) -> Vec
{ + let mut addresses = Vec::with_capacity(indexes.len()); + for chunk in indexes.chunks(Self::ADDRESS_CHUNK_SIZE) { + let inner = self.inner.read(); + chunk.iter().for_each(|index| { + if let Some(address) = inner.get_index_address(*index, prefix) { + addresses.push(address); + } + }); + } + addresses + } + + pub fn len(&self) -> usize { + self.inner.read().len() + } + + pub fn is_empty(&self) -> bool { + self.inner.read().is_empty() + } + + pub fn capacity(&self) -> usize { + self.inner.read().script_pub_keys.capacity() + } + + pub fn max_addresses(&self) -> Option { + self.inner.read().max_addresses + } +} + +impl Default for Tracker { + fn default() -> Self { + Self::new(None) + } +} + +pub struct TrackerReadGuard<'a> { + guard: RwLockReadGuard<'a, Inner>, +} + +impl<'a> TrackerReadGuard<'a> { + pub fn get_index(&'a self, index: Index) -> Option<&'a ScriptPublicKey> { + self.guard.get_index(index) + } + + pub fn iter_keys(&'a self, indexes: &'a Indexes) -> impl Iterator> { + indexes.0.iter().cloned().map(|index| self.get_index(index)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use kaspa_math::Uint256; + + fn create_addresses(start: usize, count: usize) -> Vec
{ + (start..start + count) + .map(|i| Address::new(Prefix::Mainnet, kaspa_addresses::Version::PubKey, &Uint256::from_u64(i as u64).to_le_bytes())) + .collect() + } + + #[test] + fn test_tracker_capacity_and_entry_recycling() { + const INIT_MAX_ADDRESSES: usize = 6; + const MAX_ADDRESSES: usize = ((INIT_MAX_ADDRESSES + 1) * 8 / 7).next_power_of_two() * 7 / 8 - 1; + const CAPACITY: usize = MAX_ADDRESSES + 1; + + let tracker = Tracker::new(Some(MAX_ADDRESSES)); + assert_eq!( + tracker.max_addresses().unwrap(), + MAX_ADDRESSES, + "tracker maximum address count should be expanded to the available allocated entries, minus 1 for a transient insert/swap_remove" + ); + assert_eq!( + tracker.capacity(), + CAPACITY, + "tracker capacity should match the maximum address count plus 1 extra entry for a transient insert/swap_remove" + ); + let aa = create_addresses(0, MAX_ADDRESSES); + assert_eq!(aa.len(), MAX_ADDRESSES); + + // Register addresses 0..MAX_ADDRESSES + let mut idx_a = Indexes::new(vec![]); + let aa = tracker.register(&mut idx_a, aa).unwrap(); + let aai = aa.iter().map(|x| tracker.get_address(x).unwrap().0).collect_vec(); + assert_eq!(aa.len(), MAX_ADDRESSES, "all addresses should be registered"); + assert_eq!(idx_a.len(), MAX_ADDRESSES, "all addresses should be registered"); + for i in 0..aa.len() { + assert!(tracker.contains_address(&idx_a, &aa[i]), "tracker should contain the registered address"); + assert!(idx_a.contains(aai[i]), "index set should contain the registered address index"); + } + assert_eq!(tracker.capacity(), CAPACITY); + + // Try to re-register addresses 0..MAX_ADDRESSES + let a = tracker.register(&mut idx_a, aa).unwrap(); + assert_eq!(a.len(), 0, "all addresses should already be registered"); + assert_eq!(idx_a.len(), MAX_ADDRESSES, "all addresses should still be registered"); + + // Try to register an additional address while the tracker is full + assert!( + tracker.register(&mut idx_a, create_addresses(MAX_ADDRESSES, 1)).is_err(), + "the tracker is full and should refuse a new address" + ); + + // Register address set 1..MAX_ADDRESSES, already fully covered by the tracker address set + const AB_COUNT: usize = MAX_ADDRESSES - 1; + let mut idx_b = Indexes::new(vec![]); + let ab = tracker.register(&mut idx_b, create_addresses(1, AB_COUNT)).unwrap(); + assert_eq!(ab.len(), AB_COUNT, "all addresses should be registered"); + assert_eq!(idx_b.len(), AB_COUNT, "all addresses should be registered"); + + // Empty the tracker entry containing A0 + assert_eq!(tracker.unregister(&mut idx_a, create_addresses(0, 1)).len(), 1); + assert_eq!(idx_a.len(), MAX_ADDRESSES - 1, "entry #0 with address A0 should now be marked empty"); + + // Fill the empty entry with a single new address A8 + const AC_COUNT: usize = 1; + let ac = tracker.register(&mut idx_a, create_addresses(MAX_ADDRESSES, AC_COUNT)).unwrap(); + let aci = ac.iter().map(|x| tracker.get_address(x).unwrap().0).collect_vec(); + assert_eq!(ac.len(), AC_COUNT, "a new address should be registered"); + assert_eq!(idx_a.len(), MAX_ADDRESSES, "a new address should be registered"); + assert_eq!(ac[0], create_addresses(MAX_ADDRESSES, AC_COUNT)[0], "the new address A8 should be registered"); + assert!(tracker.contains_address(&idx_a, &ac[0]), "the new address A8 should be registered"); + assert_eq!(aai[0], aci[0], "the newly registered address A8 should occupy the previously emptied entry"); + + assert_eq!( + tracker.capacity(), + CAPACITY, + "the tracker capacity should not have been affected by the transient insert/swap_remove" + ); + } + + #[test] + fn test_indexes_eq() { + let i1 = IndexSet::new(vec![0, 1, 2, 3, 5, 7, 11]); + let i2 = IndexSet::new(vec![5, 7, 11, 0, 1, 2, 3]); + let i3 = IndexSet::new(vec![0, 1, 2, 4, 8, 16, 32]); + let i4 = IndexSet::new(vec![0, 1]); + assert_eq!(i1, i1); + assert_eq!(i1, i2); + assert_ne!(i1, i3); + assert_ne!(i1, i4); + assert_eq!(i2, i2); + assert_ne!(i2, i3); + assert_ne!(i2, i4); + assert_eq!(i3, i3); + assert_ne!(i3, i4); + assert_eq!(i4, i4); + } + + #[test] + fn test_index_map_replace() { + let mut m: IndexMap = IndexMap::with_capacity(7); + m.insert(1, 10); + m.insert(2, 0); + m.insert(3, 30); + m.insert(4, 40); + assert_eq!(m.get_index(0), Some((&1, &10))); + assert_eq!(m.get_index(1), Some((&2, &0))); + assert_eq!(m.get_index(2), Some((&3, &30))); + assert_eq!(m.get_index(3), Some((&4, &40))); + + assert_eq!(m.swap_remove_index(1), Some((2, 0))); + + assert_eq!(m.get_index(0), Some((&1, &10))); + assert_eq!(m.get_index(1), Some((&4, &40))); + assert_eq!(m.get_index(2), Some((&3, &30))); + } + + #[test] + fn test_index_map_capacity() { + const CAPACITY: usize = 14; + let mut m: IndexMap = IndexMap::with_capacity(CAPACITY); + for i in 0..CAPACITY { + m.insert(i as u64, 0); + assert_eq!(m.capacity(), CAPACITY); + } + m.insert(CAPACITY as u64 + 1, 0); + assert_eq!(m.capacity(), ((CAPACITY + 1) * 8 / 7).next_power_of_two() * 7 / 8); + } +} diff --git a/notify/src/broadcaster.rs b/notify/src/broadcaster.rs index 238e247f5..fde070ec8 100644 --- a/notify/src/broadcaster.rs +++ b/notify/src/broadcaster.rs @@ -1,15 +1,21 @@ extern crate derive_more; -use super::{ - connection::Connection, error::Result, events::EventArray, listener::ListenerId, notification::Notification, - subscription::DynSubscription, +use crate::{ + connection::Connection, + error::Result, + events::{EventArray, EventType}, + listener::ListenerId, + notification::Notification, + subscription::{context::SubscriptionContext, BroadcastingSingle, DynSubscription}, }; use async_channel::{Receiver, Sender}; use core::fmt::Debug; use derive_more::Deref; -use futures::{future::FutureExt, select}; +use futures::{future::FutureExt, select_biased}; +use indexmap::IndexMap; use kaspa_core::{debug, trace}; use std::{ collections::HashMap, + fmt::Display, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -19,9 +25,9 @@ use workflow_core::channel::Channel; type ConnectionSet = HashMap; -/// Broadcast plan +/// Broadcasting plan structured by subscription, encoding and connection #[derive(Deref)] -struct Plan(HashMap>>); +struct Plan(IndexMap>>); impl Plan where @@ -31,7 +37,11 @@ where // Make sure only one instance of ìd` is registered in the whole object let result = self.remove(&id); let encoding = connection.encoding(); - self.0.entry(subscription).or_default().entry(encoding).or_default().entry(id).or_insert(connection); + self.0.entry(subscription.clone()).or_default().entry(encoding).or_default().entry(id).or_insert_with(|| { + #[cfg(test)] + trace!("Broadcasting plan: insert listener {} with {:?}", id, subscription); + connection + }); result } @@ -42,6 +52,8 @@ where 'outer: for (subscription, encoding_set) in self.0.iter_mut() { for (encoding, connection_set) in encoding_set.iter_mut() { if let Some(connection) = connection_set.remove(id) { + #[cfg(test)] + trace!("Broadcasting plan: removed listener {}", id); result = Some(connection); if connection_set.is_empty() { found_encoding = Some(encoding.clone()); @@ -81,7 +93,7 @@ where C: Connection, { Register(DynSubscription, ListenerId, C), - Unregister(DynSubscription, ListenerId), + Unregister(EventType, ListenerId), } #[derive(Debug)] @@ -91,6 +103,8 @@ where C: Connection, { name: &'static str, + index: usize, + context: SubscriptionContext, started: Arc, ctl: Channel>, incoming: Receiver, @@ -104,9 +118,17 @@ where N: Notification, C: Connection, { - pub fn new(name: &'static str, incoming: Receiver, _sync: Option>) -> Self { + pub fn new( + name: &'static str, + index: usize, + context: SubscriptionContext, + incoming: Receiver, + _sync: Option>, + ) -> Self { Self { name, + index, + context, started: Arc::new(AtomicBool::default()), ctl: Channel::unbounded(), incoming, @@ -124,24 +146,30 @@ where if self.started.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_err() { return; } - trace!("[Broadcaster-{}] Starting notification broadcasting task", self.name); + trace!("[{}] Starting notification broadcasting task", self); + let context = self.context.clone(); workflow_core::task::spawn(async move { // Broadcasting plan by event type let mut plan = EventArray::>::default(); // Create a store for closed connections to be removed from the plan let mut purge: Vec = Vec::new(); loop { - select! { + select_biased! { ctl = self.ctl.recv().fuse() => { if let Ok(ctl) = ctl { match ctl { Ctl::Register(subscription, id, connection) => { - plan[subscription.event_type()].insert(subscription, id, connection); + let event_type = subscription.event_type(); + plan[event_type].insert(subscription.broadcasting(&context), id, connection); + debug!("[{}] insert {} subscription, count = {}, capacity = {}", self, event_type, plan[event_type].len(), plan[event_type].capacity()); }, - Ctl::Unregister(subscription, id) => { - plan[subscription.event_type()].remove(&id); + Ctl::Unregister(event_type, id) => { + plan[event_type].remove(&id); + debug!("[{}] remove {} subscription, count = {}, capacity = {}", self, event_type, plan[event_type].len(), plan[event_type].capacity()); }, } + } else { + break; } }, @@ -151,7 +179,7 @@ where let event = notification.event_type(); for (subscription, encoding_set) in plan[event].iter() { // ... by subscription scope - if let Some(applied_notification) = notification.apply_subscription(&**subscription) { + if let Some(applied_notification) = notification.apply_subscription(&**subscription, &context) { for (encoding, connection_set) in encoding_set.iter() { // ... by message encoding let message = C::into_message(&applied_notification, encoding); @@ -159,11 +187,11 @@ where // ... to listeners connections match connection.send(message.clone()).await { Ok(_) => { - trace!("[Broadcaster-{}] sent notification {notification} to listener {id}", self.name); + trace!("[{}] sent notification {notification} to listener {id}", self); }, Err(_) => { if connection.is_closed() { - trace!("[Broadcaster-{}] could not send a notification to listener {id} because its connection is closed - removing it", self.name); + trace!("[{}] could not send a notification to listener {id} because its connection is closed - removing it", self); purge.push(*id); } } @@ -176,9 +204,6 @@ where purge.drain(..).for_each(|id| { plan[event].remove(&id); }); } else { - debug!("[Broadcaster-{}] notification stream ended", self.name); - let _ = self.shutdown.drain(); - let _ = self.shutdown.try_send(()); break; } } @@ -190,22 +215,27 @@ where let _ = sync.try_send(()); } } + debug!("[{}] notification stream ended", self); + let _ = self.shutdown.drain(); + let _ = self.shutdown.try_send(()); }); } pub fn register(&self, subscription: DynSubscription, id: ListenerId, connection: C) -> Result<()> { - if subscription.active() { - self.ctl.try_send(Ctl::Register(subscription, id, connection))?; - } else { - self.ctl.try_send(Ctl::Unregister(subscription, id))?; - } + assert!(subscription.active()); + self.ctl.try_send(Ctl::Register(subscription, id, connection))?; + Ok(()) + } + + pub fn unregister(&self, event_type: EventType, id: ListenerId) -> Result<()> { + self.ctl.try_send(Ctl::Unregister(event_type, id))?; Ok(()) } async fn join_notification_broadcasting_task(&self) -> Result<()> { - trace!("[Broadcaster-{}] joining", self.name); + trace!("[{}] joining", self); self.shutdown.recv().await?; - debug!("[Broadcaster-{}] terminated", self.name); + debug!("[{}] terminated", self); Ok(()) } @@ -214,6 +244,16 @@ where } } +impl Display for Broadcaster +where + N: Notification, + C: Connection, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Broadcaster-{}-{}", self.name, self.index) + } +} + #[cfg(test)] mod tests { use super::*; @@ -222,10 +262,12 @@ mod tests { listener::Listener, notification::test_helpers::*, notifier::test_helpers::{ - overall_test_steps, utxos_changed_test_steps, virtual_chain_changed_test_steps, Step, TestConnection, + overall_test_steps, utxos_changed_test_steps, virtual_chain_changed_test_steps, Step, TestConnection, SYNC_MAX_DELAY, }, + subscription::context::SubscriptionContext, }; use async_channel::{unbounded, Sender}; + use tokio::time::timeout; type TestBroadcaster = Broadcaster>; @@ -234,6 +276,7 @@ mod tests { broadcaster: Arc, /// Listeners, vector index = ListenerId listeners: Vec>, + subscription_context: SubscriptionContext, ctl_sender: Sender>, sync_receiver: Receiver<()>, notification_sender: Sender, @@ -243,15 +286,18 @@ mod tests { impl Test { fn new(name: &'static str, listener_count: usize, steps: Vec) -> Self { + const IDENT: &str = "test"; + let subscription_context = SubscriptionContext::new(); let (sync_sender, sync_receiver) = unbounded(); let (notification_sender, notification_receiver) = unbounded(); - let broadcaster = Arc::new(TestBroadcaster::new("test", notification_receiver, Some(sync_sender))); + let broadcaster = + Arc::new(TestBroadcaster::new(IDENT, 0, subscription_context.clone(), notification_receiver, Some(sync_sender))); let mut listeners = Vec::with_capacity(listener_count); let mut notification_receivers = Vec::with_capacity(listener_count); - for _ in 0..listener_count { + for i in 0..listener_count { let (sender, receiver) = unbounded(); - let connection = TestConnection::new(sender, ChannelType::Closable); - let listener = Listener::new(connection); + let connection = TestConnection::new(IDENT, sender, ChannelType::Closable); + let listener = Listener::new(i as ListenerId, connection); listeners.push(listener); notification_receivers.push(receiver); } @@ -259,6 +305,7 @@ mod tests { name, broadcaster: broadcaster.clone(), listeners, + subscription_context, ctl_sender: broadcaster.ctl.sender.clone(), sync_receiver, notification_sender, @@ -271,55 +318,112 @@ mod tests { self.broadcaster.start(); // Execute the test steps - for step in self.steps.iter() { + for (step_idx, step) in self.steps.iter().enumerate() { // Apply the subscription mutations and register the changes into the broadcaster + trace!("{} #{} - Initial Subscription Context {}", self.name, step_idx, self.subscription_context.address_tracker); for (idx, mutation) in step.mutations.iter().enumerate() { if let Some(ref mutation) = mutation { + trace!("{} #{} - {}: L{} {:?}", self.name, step_idx, step.name, idx, mutation); let event = mutation.event_type(); - if self.listeners[idx].subscriptions[event].mutate(mutation.clone()).is_some() { + let outcome = + self.listeners[idx].mutate(mutation.clone(), Default::default(), &self.subscription_context).unwrap(); + if outcome.has_new_state() { + trace!( + "{} #{} - {}: - L{} has the new state {:?}", + self.name, + step_idx, + step.name, + idx, + self.listeners[idx].subscriptions[event] + ); let ctl = match mutation.active() { true => Ctl::Register( - self.listeners[idx].subscriptions[event].clone_arc(), + self.listeners[idx].subscriptions[event].clone(), idx as u64, self.listeners[idx].connection(), ), - false => Ctl::Unregister(self.listeners[idx].subscriptions[event].clone_arc(), idx as u64), + false => Ctl::Unregister(event, idx as u64), }; assert!( self.ctl_sender.send(ctl).await.is_ok(), - "{} - {}: sending a registration message failed", + "{} #{} - {}: sending a registration message failed", self.name, + step_idx, step.name ); assert!( - self.sync_receiver.recv().await.is_ok(), - "{} - {}: receiving a sync message failed", + timeout(SYNC_MAX_DELAY, self.sync_receiver.recv()).await.unwrap().is_ok(), + "{} #{} - {}: receiving a sync message failed", self.name, + step_idx, step.name ); + } else if outcome.has_changes() { + trace!( + "{} #{} - {}: - L{} is inner changed into {:?}", + self.name, + step_idx, + step.name, + idx, + self.listeners[idx].subscriptions[event] + ); + } else { + trace!( + "{} #{} - {}: - L{} is unchanged {:?}", + self.name, + step_idx, + step.name, + idx, + self.listeners[idx].subscriptions[event] + ); } } } // Send the notification + if step_idx == 8 { + trace!("#8"); + } + trace!("{} #{} - {}: sending a notification...", self.name, step_idx, step.name); assert!( self.notification_sender.send_blocking(step.notification.clone()).is_ok(), - "{} - {}: sending the notification failed", + "{} #{} - {}: sending the notification failed", + self.name, + step_idx, + step.name + ); + trace!("{} #{} - {}: receiving sync signal...", self.name, step_idx, step.name); + assert!( + timeout(SYNC_MAX_DELAY, self.sync_receiver.recv()).await.unwrap().is_ok(), + "{} #{} - {}: receiving a sync message failed", self.name, + step_idx, step.name ); - assert!(self.sync_receiver.recv().await.is_ok(), "{} - {}: receiving a sync message failed", self.name, step.name); // Check what the listeners do receive for (idx, expected) in step.expected_notifications.iter().enumerate() { if let Some(ref expected) = expected { + assert!( + !self.notification_receivers[idx].is_empty(), + "{} #{} - {}: listener[{}] has no notification in its channel though some is expected", + self.name, + step_idx, + step.name, + idx + ); let notification = self.notification_receivers[idx].recv().await.unwrap(); - assert_eq!(*expected, notification, "{} - {}: listener[{}] got wrong notification", self.name, step.name, idx); + assert_eq!( + *expected, notification, + "{} #{} - {}: listener[{}] got wrong notification", + self.name, step_idx, step.name, idx + ); } else { assert!( self.notification_receivers[idx].is_empty(), - "{} - {}: listener[{}] has a notification in its channel but should not", + "{} #{} - {}: listener[{}] has a notification in its channel but should not", self.name, + step_idx, step.name, idx ); diff --git a/notify/src/collector.rs b/notify/src/collector.rs index 269fd2898..d4cc5ab8e 100644 --- a/notify/src/collector.rs +++ b/notify/src/collector.rs @@ -14,11 +14,10 @@ pub type CollectorNotificationChannel = Channel; pub type CollectorNotificationSender = Sender; pub type CollectorNotificationReceiver = Receiver; -/// A notification collector, relaying notifications to a [`Notifier`](notifier::Notifier). +/// A notification collector, relaying notifications to a [`Notifier`](crate::notifier::Notifier). /// -/// A [`Collector`] is responsible for collecting notifications of -/// a specific form from a specific source, convert them if necessary -/// into `N`s and forward them to the [Notifier] provided +/// A collector is responsible for collecting notifications of a specific form from a specific source, +/// convert them if necessary into `N`s and forward them to the [`Notifier`](crate::notifier::Notifier) provided /// to `Collector::start`. #[async_trait] pub trait Collector: Send + Sync + Debug @@ -127,7 +126,10 @@ mod tests { converter::ConverterFrom, events::EventType, notifier::test_helpers::NotifyMock, - subscription::single::{OverallSubscription, UtxosChangedSubscription, VirtualChainChangedSubscription}, + subscription::{ + context::SubscriptionContext, + single::{OverallSubscription, UtxosChangedSubscription, VirtualChainChangedSubscription}, + }, }; use derive_more::Display; @@ -153,15 +155,19 @@ mod tests { } impl crate::notification::Notification for OutgoingNotification { - fn apply_overall_subscription(&self, _: &OverallSubscription) -> Option { + fn apply_overall_subscription(&self, _: &OverallSubscription, _: &SubscriptionContext) -> Option { unimplemented!() } - fn apply_virtual_chain_changed_subscription(&self, _: &VirtualChainChangedSubscription) -> Option { + fn apply_virtual_chain_changed_subscription( + &self, + _: &VirtualChainChangedSubscription, + _: &SubscriptionContext, + ) -> Option { unimplemented!() } - fn apply_utxos_changed_subscription(&self, _: &UtxosChangedSubscription) -> Option { + fn apply_utxos_changed_subscription(&self, _: &UtxosChangedSubscription, _: &SubscriptionContext) -> Option { unimplemented!() } diff --git a/notify/src/connection.rs b/notify/src/connection.rs index a878698ac..3b2cb2f68 100644 --- a/notify/src/connection.rs +++ b/notify/src/connection.rs @@ -1,11 +1,11 @@ use crate::error::Error; use crate::notification::Notification; use async_channel::Sender; -use std::fmt::Debug; +use std::fmt::{Debug, Display}; use std::hash::Hash; #[async_trait::async_trait] -pub trait Connection: Clone + Debug + Send + Sync + 'static { +pub trait Connection: Clone + Display + Debug + Send + Sync + 'static { type Notification; type Message: Clone + Send + Sync; type Encoding: Hash + Clone + Eq + PartialEq + Send + Sync; @@ -29,6 +29,7 @@ pub struct ChannelConnection where N: Notification, { + name: &'static str, sender: Sender, channel_type: ChannelType, } @@ -37,8 +38,8 @@ impl ChannelConnection where N: Notification, { - pub fn new(sender: Sender, channel_type: ChannelType) -> Self { - Self { sender, channel_type } + pub fn new(name: &'static str, sender: Sender, channel_type: ChannelType) -> Self { + Self { name, sender, channel_type } } /// Close the connection, ignoring the channel type @@ -47,6 +48,15 @@ where } } +impl Display for ChannelConnection +where + N: Notification, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.name) + } +} + #[derive(Clone, Debug, Hash, Eq, PartialEq, Default)] pub enum Unchanged { #[default] diff --git a/notify/src/error.rs b/notify/src/error.rs index aadeddd5d..d6021a0bc 100644 --- a/notify/src/error.rs +++ b/notify/src/error.rs @@ -22,6 +22,9 @@ pub enum Error { #[error("event type disabled")] EventTypeDisabled, + + #[error(transparent)] + AddressError(#[from] crate::address::error::Error), } impl From for Error { diff --git a/notify/src/events.rs b/notify/src/events.rs index 3d5555d0e..2ccc91483 100644 --- a/notify/src/events.rs +++ b/notify/src/events.rs @@ -18,6 +18,13 @@ macro_rules! event_type_enum { } } } + impl std::fmt::Display for $name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + $($name::$variant_name => write!(f, stringify!($variant_name))),* + } + } + } pub const EVENT_TYPE_ARRAY: [EventType; EVENT_COUNT] = [ $($name::$variant_name),* ]; @@ -25,6 +32,10 @@ macro_rules! event_type_enum { } event_type_enum! { +/// Event type classifying subscriptions (see [`Scope`]) and notifications (see [`Notification`]) +/// +/// Note: This enum is central to the notification system. For supporting a new notification type, it is advised to +/// start by adding a new variant here. #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] pub enum EventType { BlockAdded = 0, diff --git a/notify/src/listener.rs b/notify/src/listener.rs index 81f5f186f..46a688ad5 100644 --- a/notify/src/listener.rs +++ b/notify/src/listener.rs @@ -1,13 +1,28 @@ use std::fmt::Debug; extern crate derive_more; +use kaspa_core::debug; + +use crate::{ + error::Result, + subscription::{ + context::SubscriptionContext, DynSubscription, MutateSingle, MutationOutcome, MutationPolicies, UtxosChangedMutationPolicy, + }, +}; + use super::{ connection::Connection, events::EventArray, - subscription::{array::ArrayBuilder, Mutation, SingleSubscription}, + subscription::{array::ArrayBuilder, Mutation}, }; pub type ListenerId = u64; +#[derive(Copy, Clone, Debug)] +pub enum ListenerLifespan { + Static(MutationPolicies), + Dynamic, +} + /// A listener of [`super::notifier::Notifier`] notifications. #[derive(Debug)] pub(crate) struct Listener @@ -15,27 +30,47 @@ where C: Connection, { connection: C, - pub(crate) subscriptions: EventArray, + pub(crate) subscriptions: EventArray, + pub(crate) _lifespan: ListenerLifespan, } impl Listener where C: Connection, { - pub fn new(connection: C) -> Self { - Self { connection, subscriptions: ArrayBuilder::single() } + pub fn new(id: ListenerId, connection: C) -> Self { + Self { connection, subscriptions: ArrayBuilder::single(id, None), _lifespan: ListenerLifespan::Dynamic } + } + + pub fn new_static(id: ListenerId, connection: C, context: &SubscriptionContext, policies: MutationPolicies) -> Self { + let capacity = match policies.utxo_changed { + UtxosChangedMutationPolicy::AddressSet => { + debug!( + "Creating a static listener {} with UtxosChanged capacity of {}", + connection, + context.address_tracker.max_addresses().unwrap_or_default() + ); + context.address_tracker.max_addresses() + } + UtxosChangedMutationPolicy::Wildcard => None, + }; + let subscriptions = ArrayBuilder::single(id, capacity); + Self { connection, subscriptions, _lifespan: ListenerLifespan::Static(policies) } } pub fn connection(&self) -> C { self.connection.clone() } - /// Apply a mutation to the subscriptions. - /// - /// Return Some mutations to be applied to a compounded state if any change occurred - /// in the subscription state and None otherwise. - pub fn mutate(&mut self, mutation: Mutation) -> Option> { - self.subscriptions[mutation.event_type()].mutate(mutation) + /// Apply a mutation to the subscriptions + pub fn mutate( + &mut self, + mutation: Mutation, + policies: MutationPolicies, + context: &SubscriptionContext, + ) -> Result { + let event_type = mutation.event_type(); + self.subscriptions[event_type].mutate(mutation, policies, context) } pub fn close(&self) { diff --git a/notify/src/notification.rs b/notify/src/notification.rs index b32fc4150..6be9b3593 100644 --- a/notify/src/notification.rs +++ b/notify/src/notification.rs @@ -1,3 +1,5 @@ +use crate::subscription::context::SubscriptionContext; + use super::{ events::EventType, subscription::{ @@ -7,22 +9,28 @@ use super::{ }; use std::fmt::{Debug, Display}; +/// A notification, usable throughout the full notification system via types implementing this trait pub trait Notification: Clone + Debug + Display + Send + Sync + 'static { - fn apply_overall_subscription(&self, subscription: &OverallSubscription) -> Option; + fn apply_overall_subscription(&self, subscription: &OverallSubscription, context: &SubscriptionContext) -> Option; - fn apply_virtual_chain_changed_subscription(&self, subscription: &VirtualChainChangedSubscription) -> Option; + fn apply_virtual_chain_changed_subscription( + &self, + subscription: &VirtualChainChangedSubscription, + context: &SubscriptionContext, + ) -> Option; - fn apply_utxos_changed_subscription(&self, subscription: &UtxosChangedSubscription) -> Option; + fn apply_utxos_changed_subscription(&self, subscription: &UtxosChangedSubscription, context: &SubscriptionContext) + -> Option; - fn apply_subscription(&self, subscription: &dyn Single) -> Option { + fn apply_subscription(&self, subscription: &dyn Single, context: &SubscriptionContext) -> Option { match subscription.event_type() { EventType::VirtualChainChanged => self.apply_virtual_chain_changed_subscription( subscription.as_any().downcast_ref::().unwrap(), + context, ), - EventType::UtxosChanged => { - self.apply_utxos_changed_subscription(subscription.as_any().downcast_ref::().unwrap()) - } - _ => self.apply_overall_subscription(subscription.as_any().downcast_ref::().unwrap()), + EventType::UtxosChanged => self + .apply_utxos_changed_subscription(subscription.as_any().downcast_ref::().unwrap(), context), + _ => self.apply_overall_subscription(subscription.as_any().downcast_ref::().unwrap(), context), } } @@ -68,7 +76,7 @@ macro_rules! full_featured { pub use full_featured; pub mod test_helpers { - use crate::subscription::Subscription; + use crate::subscription::{context::SubscriptionContext, Subscription}; use super::*; use derive_more::Display; @@ -106,7 +114,7 @@ pub mod test_helpers { } impl Notification for TestNotification { - fn apply_overall_subscription(&self, subscription: &OverallSubscription) -> Option { + fn apply_overall_subscription(&self, subscription: &OverallSubscription, _: &SubscriptionContext) -> Option { trace!("apply_overall_subscription: {self:?}, {subscription:?}"); match subscription.active() { true => Some(self.clone()), @@ -114,7 +122,11 @@ pub mod test_helpers { } } - fn apply_virtual_chain_changed_subscription(&self, subscription: &VirtualChainChangedSubscription) -> Option { + fn apply_virtual_chain_changed_subscription( + &self, + subscription: &VirtualChainChangedSubscription, + _: &SubscriptionContext, + ) -> Option { match subscription.active() { true => { if let TestNotification::VirtualChainChanged(ref payload) = self { @@ -131,13 +143,25 @@ pub mod test_helpers { } } - fn apply_utxos_changed_subscription(&self, subscription: &UtxosChangedSubscription) -> Option { + fn apply_utxos_changed_subscription( + &self, + subscription: &UtxosChangedSubscription, + context: &SubscriptionContext, + ) -> Option { match subscription.active() { true => { if let TestNotification::UtxosChanged(ref payload) = self { + let subscription = subscription.data(); if !subscription.to_all() { - let addresses = - payload.addresses.iter().filter(|x| subscription.contains_address(x)).cloned().collect::>(); + // trace!("apply_utxos_changed_subscription: Notification payload {:?}", payload); + // trace!("apply_utxos_changed_subscription: Subscription content {:?}", subscription); + // trace!("apply_utxos_changed_subscription: Subscription Context {}", context.address_tracker); + let addresses = payload + .addresses + .iter() + .filter(|x| subscription.contains_address(x, context)) + .cloned() + .collect::>(); if !addresses.is_empty() { return Some(TestNotification::UtxosChanged(UtxosChangedNotification { data: payload.data, diff --git a/notify/src/notifier.rs b/notify/src/notifier.rs index 49d1cc936..41ae6626d 100644 --- a/notify/src/notifier.rs +++ b/notify/src/notifier.rs @@ -1,4 +1,8 @@ -use crate::events::EVENT_TYPE_ARRAY; +use crate::{ + events::EVENT_TYPE_ARRAY, + listener::ListenerLifespan, + subscription::{context::SubscriptionContext, MutationPolicies, UtxosChangedMutationPolicy}, +}; use super::{ broadcaster::Broadcaster, @@ -45,21 +49,61 @@ pub type DynNotify = Arc>; // pub type DynRegistrar = Arc>; -/// A Notifier is a notification broadcaster that manages a collection of [`Listener`]s and, for each one, -/// a set of subscriptions to notifications by event type. +/// A notifier is a notification broadcaster. It receives notifications from upstream _parents_ and +/// broadcasts those downstream to its _children_ listeners. Symmetrically, it receives subscriptions +/// from its downward listeners, compounds those internally and pushes upward the subscriptions resulting +/// of the compounding, if any, to the _parents_. +/// +/// ### Enabled event types +/// +/// A notifier has a set of enabled event type (see [`EventType`]). It only broadcasts notifications whose +/// event type is enabled and drops the others. The same goes for subscriptions. +/// +/// Each subscriber has a set of enabled event type. No two subscribers may have the same event type enabled. +/// The union of the sets of all subscribers should match the set of the notifier, though this is not mandatory. +/// +/// ### Mutation policies +/// +/// The notifier is built with some mutation policies defining how an processed listener mutation must be propagated +/// to the _parent_. +/// +/// ### Architecture +/// +/// #### Internal structure +/// +/// The notifier notably owns: +/// +/// - a vector of [`DynCollector`] +/// - a vector of [`Subscriber`] +/// - a pool of [`Broadcaster`] +/// - a map of [`Listener`] +/// +/// Collectors and subscribers form the scaffold. They are provided to the ctor, are immutable and share its +/// lifespan. Both do materialize a connection to the notifier _parents_, collectors for incoming notifications +/// and subscribers for outgoing subscriptions. They may usually be paired by index in their respective +/// vector but this by no means is mandatory, opening a field for special edge cases. +/// +/// The broadcasters are built in the ctor according to a provided count. They act as a pool of workers competing +/// for the processing of an incoming notification. +/// +/// The listeners are managed dynamically through registration/unregistration calls. +/// +/// #### External conformation /// -/// A Notifier may own some [`DynCollector`]s which collect incoming notifications and relay them -/// to their owner. The notification sources of the collectors should be considered as the "parents" in -/// the notification DAG. +/// The notifier is designed so that many instances can be interconnected and form a DAG of notifiers. /// -/// A Notifier may own some [`Subscriber`]s which report the subscription needs of their owner's listeners -/// to the "parents" in the notification DAG. +/// However, the notifications path from the root all the way downstream to the final clients is forming a tree, +/// not a DAG. This is because, for a given type of notification (see [`EventType`]), a notifier has at most a single +/// _parent_ provider. /// -/// A notifier broadcasts its incoming notifications to its listeners. +/// The same is symmetrically true about subscriptions which travel upstream from clients to the root along a tree, +/// meaning that, for a given type of subscription (see [`EventType`]), a notifier has at most a single subscriber, +/// targeting a single _parent_. /// -/// A notifier is build with a specific set of enabled event types (see `enabled_events`). All disabled -/// event types are ignored by it. It is however possible to manually subscribe to a disabled scope and -/// thus have a custom made collector of the notifier receive notifications of the disabled scope, +/// ### Special considerations +/// +/// A notifier is built with a specific set of enabled event types. It is however possible to manually subscribe +/// to a disabled scope and thus have a custom-made collector of the notifier receive notifications of this disabled scope, /// allowing some handling of the notification into the collector before it gets dropped by the notifier. #[derive(Debug)] pub struct Notifier @@ -80,9 +124,11 @@ where enabled_events: EventSwitches, collectors: Vec>, subscribers: Vec>, + subscription_context: SubscriptionContext, broadcasters: usize, + policies: MutationPolicies, ) -> Self { - Self::with_sync(name, enabled_events, collectors, subscribers, broadcasters, None) + Self::with_sync(name, enabled_events, collectors, subscribers, subscription_context, broadcasters, policies, None) } pub fn with_sync( @@ -90,18 +136,39 @@ where enabled_events: EventSwitches, collectors: Vec>, subscribers: Vec>, + subscription_context: SubscriptionContext, broadcasters: usize, + policies: MutationPolicies, _sync: Option>, ) -> Self { - Self { inner: Arc::new(Inner::new(name, enabled_events, collectors, subscribers, broadcasters, _sync)) } + Self { + inner: Arc::new(Inner::new( + name, + enabled_events, + collectors, + subscribers, + subscription_context, + broadcasters, + policies, + _sync, + )), + } + } + + pub fn subscription_context(&self) -> &SubscriptionContext { + &self.inner.subscription_context + } + + pub fn enabled_events(&self) -> &EventSwitches { + &self.inner.enabled_events } pub fn start(self: Arc) { self.inner.clone().start(self.clone()); } - pub fn register_new_listener(&self, connection: C) -> ListenerId { - self.inner.clone().register_new_listener(connection) + pub fn register_new_listener(&self, connection: C, lifespan: ListenerLifespan) -> ListenerId { + self.inner.register_new_listener(connection, lifespan) } /// Resend the compounded subscription state of the notifier to its subscribers (its parents). @@ -191,6 +258,15 @@ where /// Subscribers subscribers: Vec>, + /// Enabled Subscriber by event type + enabled_subscriber: EventArray>>, + + /// Subscription context + subscription_context: SubscriptionContext, + + /// Mutation policies + policies: MutationPolicies, + /// Name of the notifier, used in logs pub name: &'static str, @@ -208,23 +284,47 @@ where enabled_events: EventSwitches, collectors: Vec>, subscribers: Vec>, + subscription_context: SubscriptionContext, broadcasters: usize, + policies: MutationPolicies, _sync: Option>, ) -> Self { assert!(broadcasters > 0, "a notifier requires a minimum of one broadcaster"); let notification_channel = Channel::unbounded(); let broadcasters = (0..broadcasters) - .map(|_| Arc::new(Broadcaster::new(name, notification_channel.receiver.clone(), _sync.clone()))) + .map(|idx| { + Arc::new(Broadcaster::new( + name, + idx, + subscription_context.clone(), + notification_channel.receiver.clone(), + _sync.clone(), + )) + }) .collect::>(); + let enabled_subscriber = EventArray::from_fn(|index| { + let event: EventType = index.try_into().unwrap(); + let mut iter = subscribers.iter().filter(|&x| x.handles_event_type(event)).cloned(); + let subscriber = iter.next(); + assert!(iter.next().is_none(), "A notifier is not allowed to have more than one subscriber per event type"); + subscriber + }); + let utxos_changed_capacity = match policies.utxo_changed { + UtxosChangedMutationPolicy::AddressSet => subscription_context.address_tracker.max_addresses(), + UtxosChangedMutationPolicy::Wildcard => None, + }; Self { enabled_events, listeners: Mutex::new(HashMap::new()), - subscriptions: Mutex::new(ArrayBuilder::compounded()), + subscriptions: Mutex::new(ArrayBuilder::compounded(utxos_changed_capacity)), started: Arc::new(AtomicBool::new(false)), notification_channel, broadcasters, collectors, subscribers, + enabled_subscriber, + subscription_context, + policies, name, _sync, } @@ -242,7 +342,7 @@ where } } - fn register_new_listener(self: &Arc, connection: C) -> ListenerId { + fn register_new_listener(self: &Arc, connection: C, lifespan: ListenerLifespan) -> ListenerId { let mut listeners = self.listeners.lock(); loop { let id = u64::from_le_bytes(rand::random::<[u8; 8]>()); @@ -250,7 +350,10 @@ where // This is very unlikely to happen but still, check for duplicates if let Entry::Vacant(e) = listeners.entry(id) { trace!("[Notifier {}] registering listener {id}", self.name); - let listener = Listener::new(connection); + let listener = match lifespan { + ListenerLifespan::Static(policies) => Listener::new_static(id, connection, &self.subscription_context, policies), + ListenerLifespan::Dynamic => Listener::new(id, connection), + }; e.insert(listener); return id; } @@ -264,13 +367,13 @@ where trace!("[Notifier {}] unregistering listener {id}", self.name); // Cancel all remaining active subscriptions - let mut subscriptions = listener + let mut events = listener .subscriptions .iter() - .filter_map(|subscription| if subscription.active() { Some(subscription.scope()) } else { None }) + .filter_map(|subscription| if subscription.active() { Some(subscription.event_type()) } else { None }) .collect_vec(); - subscriptions.drain(..).for_each(|scope| { - let _ = self.execute_subscribe_command_impl(id, &mut listener, scope, Command::Stop); + events.drain(..).for_each(|event| { + let _ = self.execute_subscribe_command_impl(id, &mut listener, event.into(), Command::Stop); }); // Close the listener @@ -283,7 +386,7 @@ where } pub fn execute_subscribe_command(&self, id: ListenerId, scope: Scope, command: Command) -> Result<()> { - let event: EventType = (&scope).into(); + let event = scope.event_type(); if self.enabled_events[event] { let mut listeners = self.listeners.lock(); if let Some(listener) = listeners.get_mut(&id) { @@ -292,10 +395,7 @@ where trace!("[Notifier {}] {command} notifying listener {id} about {scope} error: listener id not found", self.name); } } else { - trace!( - "[Notifier {}] {command} notifying listener {id} about {scope:?} error: event type {event:?} is disabled", - self.name - ); + trace!("[Notifier {}] {command} notifying listener {id} about {scope} error: event type {event:?} is disabled", self.name); return Err(Error::EventTypeDisabled); } Ok(()) @@ -308,27 +408,37 @@ where scope: Scope, command: Command, ) -> Result<()> { - let event: EventType = (&scope).into(); - let mut subscriptions = self.subscriptions.lock(); - debug!("[Notifier {}] {command} notifying about {scope} to listener {id}", self.name); - if let Some(mutations) = listener.mutate(Mutation::new(command, scope.clone())) { - trace!("[Notifier {}] {command} notifying listener {id} about {scope:?} involves mutations {mutations:?}", self.name); + let mut sync_feedback: bool = false; + let event = scope.event_type(); + let scope_trace = format!("{scope}"); + debug!("[Notifier {}] {command} notifying about {scope_trace} to listener {id} - {}", self.name, listener.connection()); + let outcome = listener.mutate(Mutation::new(command, scope), self.policies, &self.subscription_context)?; + if outcome.has_changes() { + trace!( + "[Notifier {}] {command} notifying listener {id} about {scope_trace} involves {} mutations", + self.name, + outcome.mutations.len(), + ); // Update broadcasters - let subscription = listener.subscriptions[event].clone_arc(); - self.broadcasters - .iter() - .try_for_each(|broadcaster| broadcaster.register(subscription.clone(), id, listener.connection()))?; - // Compound mutations - let mut compound_result = None; - for mutation in mutations { - compound_result = subscriptions[event].compound(mutation); - } - // Report to the parents - if let Some(mutation) = compound_result { - self.subscribers.iter().try_for_each(|x| x.mutate(mutation.clone()))?; + match (listener.subscriptions[event].active(), outcome.mutated) { + (true, Some(subscription)) => { + self.broadcasters + .iter() + .try_for_each(|broadcaster| broadcaster.register(subscription.clone(), id, listener.connection()))?; + } + (true, None) => { + sync_feedback = true; + } + (false, _) => { + self.broadcasters.iter().try_for_each(|broadcaster| broadcaster.unregister(event, id))?; + } } + self.apply_mutations(event, outcome.mutations, &self.subscription_context)?; } else { - trace!("[Notifier {}] {command} notifying listener {id} about {scope:?} is ignored (no mutation)", self.name); + trace!("[Notifier {}] {command} notifying listener {id} about {scope_trace} is ignored (no mutation)", self.name); + sync_feedback = true; + } + if sync_feedback { // In case we have a sync channel, report that the command was processed. // This is for test only. if let Some(ref sync) = self._sync { @@ -338,6 +448,22 @@ where Ok(()) } + fn apply_mutations(&self, event: EventType, mutations: Vec, context: &SubscriptionContext) -> Result<()> { + let mut subscriptions = self.subscriptions.lock(); + // Compound mutations + let mut compound_result = None; + for mutation in mutations { + compound_result = subscriptions[event].compound(mutation, context); + } + // Report to the parent if any + if let Some(mutation) = compound_result { + if let Some(ref subscriber) = self.enabled_subscriber[event] { + subscriber.mutate(mutation)?; + } + } + Ok(()) + } + fn start_notify(&self, id: ListenerId, scope: Scope) -> Result<()> { self.execute_subscribe_command(id, scope, Command::Start) } @@ -356,7 +482,7 @@ where fn renew_subscriptions(&self) -> Result<()> { let subscriptions = self.subscriptions.lock(); EVENT_TYPE_ARRAY.iter().copied().filter(|x| self.enabled_events[*x] && subscriptions[*x].active()).try_for_each(|x| { - let mutation = Mutation::new(Command::Start, subscriptions[x].scope()); + let mutation = Mutation::new(Command::Start, subscriptions[x].scope(&self.subscription_context)); self.subscribers.iter().try_for_each(|subscriber| subscriber.mutate(mutation.clone()))?; Ok(()) }) @@ -416,6 +542,9 @@ pub mod test_helpers { subscriber::test_helpers::SubscriptionMessage, }; use async_channel::Sender; + use std::time::Duration; + + pub const SYNC_MAX_DELAY: Duration = Duration::from_secs(2); pub type TestConnection = ChannelConnection; pub type TestNotifier = Notifier>; @@ -702,6 +831,7 @@ mod tests { subscriber::test_helpers::{SubscriptionManagerMock, SubscriptionMessage}, }; use async_channel::{unbounded, Receiver, Sender}; + use tokio::time::timeout; const SUBSCRIPTION_MANAGER_ID: u64 = 0; @@ -718,14 +848,16 @@ mod tests { impl Test { fn new(name: &'static str, listener_count: usize, steps: Vec) -> Self { + const IDENT: &str = "test"; type TestConverter = ConverterFrom; type TestCollector = CollectorFrom; // Build the full-featured notifier let (sync_sender, sync_receiver) = unbounded(); let (notification_sender, notification_receiver) = unbounded(); let (subscription_sender, subscription_receiver) = unbounded(); - let collector = Arc::new(TestCollector::new("test", notification_receiver, Arc::new(TestConverter::new()))); + let collector = Arc::new(TestCollector::new(IDENT, notification_receiver, Arc::new(TestConverter::new()))); let subscription_manager = Arc::new(SubscriptionManagerMock::new(subscription_sender)); + let subscription_context = SubscriptionContext::new(); let subscriber = Arc::new(Subscriber::new("test", EVENT_TYPE_ARRAY[..].into(), subscription_manager, SUBSCRIPTION_MANAGER_ID)); let notifier = Arc::new(TestNotifier::with_sync( @@ -733,7 +865,9 @@ mod tests { EVENT_TYPE_ARRAY[..].into(), vec![collector], vec![subscriber], + subscription_context, 1, + Default::default(), Some(sync_sender), )); // Create the listeners @@ -741,8 +875,8 @@ mod tests { let mut notification_receivers = Vec::with_capacity(listener_count); for _ in 0..listener_count { let (sender, receiver) = unbounded(); - let connection = TestConnection::new(sender, ChannelType::Closable); - listeners.push(notifier.register_new_listener(connection)); + let connection = TestConnection::new(IDENT, sender, ChannelType::Closable); + listeners.push(notifier.register_new_listener(connection, ListenerLifespan::Dynamic)); notification_receivers.push(receiver); } // Return the built test object @@ -778,7 +912,7 @@ mod tests { ); trace!("Receiving sync message #{step_idx} after subscribing"); assert!( - self.sync_receiver.recv().await.is_ok(), + timeout(SYNC_MAX_DELAY, self.sync_receiver.recv()).await.unwrap().is_ok(), "{} - {}: receiving a sync message failed", self.name, step.name @@ -790,6 +924,13 @@ mod tests { "{} - {}: the listener[{}] mutation {mutation:?} yielded the wrong subscription", self.name, step.name, idx ); + assert!( + self.subscription_receiver.is_empty(), + "{} - {}: listener[{}] mutation {mutation:?} yielded an extra subscription but should not", + self.name, + step.name, + idx + ); } else { assert!( self.subscription_receiver.is_empty(), @@ -811,7 +952,12 @@ mod tests { step.name ); trace!("Receiving sync message #{step_idx} after notifying"); - assert!(self.sync_receiver.recv().await.is_ok(), "{} - {}: receiving a sync message failed", self.name, step.name); + assert!( + timeout(SYNC_MAX_DELAY, self.sync_receiver.recv()).await.unwrap().is_ok(), + "{} - {}: receiving a sync message failed", + self.name, + step.name + ); // Check what the listeners do receive for (idx, expected_notifications) in step.expected_notifications.iter().enumerate() { diff --git a/notify/src/root.rs b/notify/src/root.rs index b21e7662a..257a30de9 100644 --- a/notify/src/root.rs +++ b/notify/src/root.rs @@ -6,7 +6,10 @@ use crate::{ notifier::Notify, scope::Scope, subscriber::SubscriptionManager, - subscription::{array::ArrayBuilder, Command, Mutation, SingleSubscription}, + subscription::{ + array::ArrayBuilder, context::SubscriptionContext, Command, DynSubscription, MutateSingle, Mutation, MutationPolicies, + UtxosChangedMutationPolicy, + }, }; use async_channel::Sender; use async_trait::async_trait; @@ -34,7 +37,12 @@ where N: Notification, { pub fn new(sender: Sender) -> Self { - let inner = Arc::new(Inner::new(sender)); + let subscription_context = SubscriptionContext::new(); + Self::with_context(sender, subscription_context) + } + + pub fn with_context(sender: Sender, subscription_context: SubscriptionContext) -> Self { + let inner = Arc::new(Inner::new(sender, subscription_context)); Self { inner } } @@ -89,22 +97,27 @@ where N: Notification, { sender: Sender, - subscriptions: RwLock>, + subscriptions: RwLock>, + subscription_context: SubscriptionContext, + policies: MutationPolicies, } impl Inner where N: Notification, { - fn new(sender: Sender) -> Self { - let subscriptions = RwLock::new(ArrayBuilder::single()); - Self { sender, subscriptions } + const ROOT_LISTENER_ID: ListenerId = 1; + + fn new(sender: Sender, subscription_context: SubscriptionContext) -> Self { + let subscriptions = RwLock::new(ArrayBuilder::single(Self::ROOT_LISTENER_ID, None)); + let policies = MutationPolicies::new(UtxosChangedMutationPolicy::Wildcard); + Self { sender, subscriptions, subscription_context, policies } } fn send(&self, notification: N) -> Result<()> { let event = notification.event_type(); let subscription = &self.subscriptions.read()[event]; - if let Some(applied_notification) = notification.apply_subscription(&**subscription) { + if let Some(applied_notification) = notification.apply_subscription(&**subscription, &self.subscription_context) { self.sender.try_send(applied_notification)?; } Ok(()) @@ -113,7 +126,7 @@ where pub fn execute_subscribe_command(&self, scope: Scope, command: Command) -> Result<()> { let mutation = Mutation::new(command, scope); let mut subscriptions = self.subscriptions.write(); - subscriptions[mutation.event_type()].mutate(mutation); + subscriptions[mutation.event_type()].mutate(mutation, self.policies, &self.subscription_context)?; Ok(()) } @@ -125,7 +138,7 @@ where let event = notification.event_type(); let subscription = &self.subscriptions.read()[event]; if subscription.active() { - if let Some(applied_notification) = notification.apply_subscription(&**subscription) { + if let Some(applied_notification) = notification.apply_subscription(&**subscription, &self.subscription_context) { self.sender.try_send(applied_notification)?; } } diff --git a/notify/src/scope.rs b/notify/src/scope.rs index b10888877..0d9e33544 100644 --- a/notify/src/scope.rs +++ b/notify/src/scope.rs @@ -32,6 +32,7 @@ macro_rules! scope_enum { } scope_enum! { +/// Subscription scope for every event type #[derive(Clone, Display, Debug, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub enum Scope { BlockAdded, @@ -46,6 +47,12 @@ pub enum Scope { } } +impl Scope { + pub fn event_type(&self) -> EventType { + self.into() + } +} + #[derive(Clone, Display, Debug, Default, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct BlockAddedScope {} diff --git a/notify/src/subscriber.rs b/notify/src/subscriber.rs index 802a6f470..7e2bf4eab 100644 --- a/notify/src/subscriber.rs +++ b/notify/src/subscriber.rs @@ -6,7 +6,7 @@ use std::sync::{ Arc, }; extern crate derive_more; -use crate::events::EventSwitches; +use crate::events::{EventSwitches, EventType}; use super::{ error::Result, @@ -16,7 +16,7 @@ use super::{ }; use workflow_core::channel::Channel; -/// A manager of subscriptions to notifications for registered listeners +/// A manager of subscriptions (see [`Scope`]) for registered listeners #[async_trait] pub trait SubscriptionManager: Send + Sync + Debug { async fn start_notify(&self, id: ListenerId, scope: Scope) -> Result<()>; @@ -32,7 +32,13 @@ pub trait SubscriptionManager: Send + Sync + Debug { pub type DynSubscriptionManager = Arc; -/// A subscriber handling subscription messages executing them into a [SubscriptionManager]. +/// A subscriber handling subscription messages as [`Mutation`] and executing them into a [SubscriptionManager] +/// +/// A subscriber has a set of enabled event type (see [`EventType`]). It only handles subscriptions +/// whose event type is enabled and drops all others. +/// +/// A subscriber has a listener ID identifying its owner (usually a [`Notifier`](crate::notifier::Notifier)) as a listener of its manager +/// (usually also a [`Notifier`](crate::notifier::Notifier)). #[derive(Debug)] pub struct Subscriber { name: &'static str, @@ -71,6 +77,10 @@ impl Subscriber { } } + pub fn handles_event_type(&self, event_type: EventType) -> bool { + self.enabled_events[event_type] + } + pub fn start(self: &Arc) { self.clone().spawn_subscription_receiver_task(); } @@ -84,7 +94,7 @@ impl Subscriber { trace!("[Subscriber {}] starting subscription receiving task", self.name); workflow_core::task::spawn(async move { while let Ok(mutation) = self.incoming.recv().await { - if self.enabled_events[mutation.event_type()] { + if self.handles_event_type(mutation.event_type()) { if let Err(err) = self .subscription_manager .clone() diff --git a/notify/src/subscription/array.rs b/notify/src/subscription/array.rs index 6aad07c45..5a5a988d2 100644 --- a/notify/src/subscription/array.rs +++ b/notify/src/subscription/array.rs @@ -1,27 +1,37 @@ -use super::{compounded, single, CompoundedSubscription, SingleSubscription}; -use crate::events::{EventArray, EventType}; +use crate::{ + events::{EventArray, EventType}, + listener::ListenerId, + subscription::{compounded, single, CompoundedSubscription, DynSubscription}, +}; +use std::sync::Arc; pub struct ArrayBuilder {} impl ArrayBuilder { - pub fn single() -> EventArray { + pub fn single(listener_id: ListenerId, utxos_changed_capacity: Option) -> EventArray { EventArray::from_fn(|i| { let event_type = EventType::try_from(i).unwrap(); - let subscription: SingleSubscription = match event_type { - EventType::VirtualChainChanged => Box::::default(), - EventType::UtxosChanged => Box::::default(), - _ => Box::new(single::OverallSubscription::new(event_type, false)), + let subscription: DynSubscription = match event_type { + EventType::VirtualChainChanged => Arc::::default(), + EventType::UtxosChanged => Arc::new(single::UtxosChangedSubscription::with_capacity( + single::UtxosChangedState::None, + listener_id, + utxos_changed_capacity.unwrap_or_default(), + )), + _ => Arc::new(single::OverallSubscription::new(event_type, false)), }; subscription }) } - pub fn compounded() -> EventArray { + pub fn compounded(utxos_changed_capacity: Option) -> EventArray { EventArray::from_fn(|i| { let event_type = EventType::try_from(i).unwrap(); let subscription: CompoundedSubscription = match event_type { EventType::VirtualChainChanged => Box::::default(), - EventType::UtxosChanged => Box::::default(), + EventType::UtxosChanged => { + Box::new(compounded::UtxosChangedSubscription::with_capacity(utxos_changed_capacity.unwrap_or_default())) + } _ => Box::new(compounded::OverallSubscription::new(event_type)), }; subscription @@ -36,8 +46,8 @@ mod tests { #[test] fn test_array_builder() { - let single = ArrayBuilder::single(); - let compounded = ArrayBuilder::compounded(); + let single = ArrayBuilder::single(0, None); + let compounded = ArrayBuilder::compounded(None); EVENT_TYPE_ARRAY.into_iter().for_each(|event| { assert_eq!( event, diff --git a/notify/src/subscription/compounded.rs b/notify/src/subscription/compounded.rs index 72f5188a7..2a36647f4 100644 --- a/notify/src/subscription/compounded.rs +++ b/notify/src/subscription/compounded.rs @@ -1,11 +1,11 @@ -use super::{Compounded, Mutation, Subscription}; use crate::{ + address::{error::Result, tracker::Counters}, events::EventType, scope::{Scope, UtxosChangedScope, VirtualChainChangedScope}, - subscription::Command, + subscription::{context::SubscriptionContext, Command, Compounded, Mutation, Subscription}, }; -use kaspa_addresses::Address; -use std::collections::{HashMap, HashSet}; +use itertools::Itertools; +use kaspa_addresses::{Address, Prefix}; #[derive(Clone, Debug, PartialEq, Eq)] pub struct OverallSubscription { @@ -20,7 +20,7 @@ impl OverallSubscription { } impl Compounded for OverallSubscription { - fn compound(&mut self, mutation: Mutation) -> Option { + fn compound(&mut self, mutation: Mutation, _context: &SubscriptionContext) -> Option { assert_eq!(self.event_type(), mutation.event_type()); match mutation.command { Command::Start => { @@ -51,7 +51,7 @@ impl Subscription for OverallSubscription { self.active > 0 } - fn scope(&self) -> Scope { + fn scope(&self, _context: &SubscriptionContext) -> Scope { self.event_type.into() } } @@ -84,7 +84,7 @@ impl VirtualChainChangedSubscription { } impl Compounded for VirtualChainChangedSubscription { - fn compound(&mut self, mutation: Mutation) -> Option { + fn compound(&mut self, mutation: Mutation, _context: &SubscriptionContext) -> Option { assert_eq!(self.event_type(), mutation.event_type()); if let Scope::VirtualChainChanged(ref scope) = mutation.scope { let all = scope.include_accepted_transaction_ids; @@ -144,7 +144,7 @@ impl Subscription for VirtualChainChangedSubscription { self.include_accepted_transaction_ids.iter().sum::() > 0 } - fn scope(&self) -> Scope { + fn scope(&self, _context: &SubscriptionContext) -> Scope { Scope::VirtualChainChanged(VirtualChainChangedScope::new(self.all() > 0)) } } @@ -152,71 +152,73 @@ impl Subscription for VirtualChainChangedSubscription { #[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct UtxosChangedSubscription { all: usize, - addresses: HashMap, + indexes: Counters, +} + +impl UtxosChangedSubscription { + pub fn new() -> Self { + Self { all: 0, indexes: Counters::new() } + } + + pub fn with_capacity(capacity: usize) -> Self { + Self { all: 0, indexes: Counters::with_capacity(capacity) } + } + + pub fn to_addresses(&self, prefix: Prefix, context: &SubscriptionContext) -> Vec
{ + self.indexes + .iter() + .filter_map(|(&index, &count)| { + (count > 0).then_some(()).and_then(|_| context.address_tracker.get_address_at_index(index, prefix)) + }) + .collect_vec() + } + + pub fn register(&mut self, addresses: Vec
, context: &SubscriptionContext) -> Result> { + context.address_tracker.register(&mut self.indexes, addresses) + } + + pub fn unregister(&mut self, addresses: Vec
, context: &SubscriptionContext) -> Vec
{ + context.address_tracker.unregister(&mut self.indexes, addresses) + } } impl Compounded for UtxosChangedSubscription { - fn compound(&mut self, mutation: Mutation) -> Option { + fn compound(&mut self, mutation: Mutation, context: &SubscriptionContext) -> Option { assert_eq!(self.event_type(), mutation.event_type()); - if let Scope::UtxosChanged(mut scope) = mutation.scope { + if let Scope::UtxosChanged(scope) = mutation.scope { match mutation.command { Command::Start => { if scope.addresses.is_empty() { // Add All self.all += 1; if self.all == 1 { - return Some(Mutation::new(Command::Start, Scope::UtxosChanged(UtxosChangedScope::default()))); + return Some(Mutation::new(Command::Start, UtxosChangedScope::default().into())); } } else { // Add(A) - let mut added = vec![]; - // Make sure no duplicate exists in addresses - let addresses: HashSet
= scope.addresses.drain(0..).collect(); - for address in addresses { - self.addresses.entry(address.clone()).and_modify(|counter| *counter += 1).or_insert_with(|| { - added.push(address); - 1 - }); - } + let added = self.register(scope.addresses, context).expect("compounded always registers"); if !added.is_empty() && self.all == 0 { - return Some(Mutation::new(Command::Start, Scope::UtxosChanged(UtxosChangedScope::new(added)))); + return Some(Mutation::new(Command::Start, UtxosChangedScope::new(added).into())); } } } Command::Stop => { if !scope.addresses.is_empty() { // Remove(R) - let mut removed = vec![]; - // Make sure no duplicate exists in addresses - let addresses: HashSet
= scope.addresses.drain(0..).collect(); - for address in addresses { - assert!(self.addresses.contains_key(&address)); - self.addresses.entry(address.clone()).and_modify(|counter| { - *counter -= 1; - if *counter == 0 { - removed.push(address); - } - }); - } - // Cleanup self.addresses - removed.iter().for_each(|x| { - self.addresses.remove(x); - }); + let removed = self.unregister(scope.addresses, context); if !removed.is_empty() && self.all == 0 { - return Some(Mutation::new(Command::Stop, Scope::UtxosChanged(UtxosChangedScope::new(removed)))); + return Some(Mutation::new(Command::Stop, UtxosChangedScope::new(removed).into())); } } else { // Remove All assert!(self.all > 0); self.all -= 1; if self.all == 0 { - if !self.addresses.is_empty() { - return Some(Mutation::new( - Command::Start, - Scope::UtxosChanged(UtxosChangedScope::new(self.addresses.keys().cloned().collect())), - )); + let addresses = self.to_addresses(Prefix::Mainnet, context); + if !addresses.is_empty() { + return Some(Mutation::new(Command::Start, UtxosChangedScope::new(addresses).into())); } else { - return Some(Mutation::new(Command::Stop, Scope::UtxosChanged(UtxosChangedScope::default()))); + return Some(Mutation::new(Command::Stop, UtxosChangedScope::default().into())); } } } @@ -234,20 +236,25 @@ impl Subscription for UtxosChangedSubscription { } fn active(&self) -> bool { - self.all > 0 || !self.addresses.is_empty() + self.all > 0 || !self.indexes.is_empty() } - fn scope(&self) -> Scope { - let addresses = if self.all > 0 { vec![] } else { self.addresses.keys().cloned().collect() }; + fn scope(&self, context: &SubscriptionContext) -> Scope { + let addresses = if self.all > 0 { vec![] } else { self.to_addresses(Prefix::Mainnet, context) }; Scope::UtxosChanged(UtxosChangedScope::new(addresses)) } } #[cfg(test)] mod tests { + use kaspa_core::trace; + use super::super::*; use super::*; - use crate::{address::test_helpers::get_3_addresses, scope::BlockAddedScope}; + use crate::{ + address::{test_helpers::get_3_addresses, tracker::Counter}, + scope::BlockAddedScope, + }; use std::panic::AssertUnwindSafe; struct Step { @@ -258,6 +265,7 @@ mod tests { struct Test { name: &'static str, + context: SubscriptionContext, initial_state: CompoundedSubscription, steps: Vec, final_state: CompoundedSubscription, @@ -266,9 +274,11 @@ mod tests { impl Test { fn run(&self) -> CompoundedSubscription { let mut state = self.initial_state.clone_box(); - for step in self.steps.iter() { - let result = state.compound(step.mutation.clone()); + for (idx, step) in self.steps.iter().enumerate() { + trace!("{}: {}", idx, step.name); + let result = state.compound(step.mutation.clone(), &self.context); assert_eq!(step.result, result, "{} - {}: wrong compound result", self.name, step.name); + trace!("{}: state = {:?}", idx, state); } assert_eq!(*self.final_state, *state, "{}: wrong final state", self.name); state @@ -283,6 +293,7 @@ mod tests { let remove = || Mutation::new(Command::Stop, Scope::BlockAdded(BlockAddedScope {})); let test = Test { name: "OverallSubscription 0 to 2 to 0", + context: SubscriptionContext::new(), initial_state: none(), steps: vec![ Step { name: "add 1", mutation: add(), result: Some(add()) }, @@ -295,7 +306,7 @@ mod tests { let mut state = test.run(); // Removing once more must panic - let result = std::panic::catch_unwind(AssertUnwindSafe(|| state.compound(remove()))); + let result = std::panic::catch_unwind(AssertUnwindSafe(|| state.compound(remove(), &test.context))); assert!(result.is_err(), "{}: trying to remove when counter is zero must panic", test.name); } @@ -312,6 +323,7 @@ mod tests { let remove_all = || m(Command::Stop, true); let test = Test { name: "VirtualChainChanged", + context: SubscriptionContext::new(), initial_state: none(), steps: vec![ Step { name: "add all 1", mutation: add_all(), result: Some(add_all()) }, @@ -335,20 +347,21 @@ mod tests { let mut state = test.run(); // Removing once more must panic - let result = std::panic::catch_unwind(AssertUnwindSafe(|| state.compound(remove_all()))); + let result = std::panic::catch_unwind(AssertUnwindSafe(|| state.compound(remove_all(), &test.context))); assert!(result.is_err(), "{}: trying to remove all when counter is zero must panic", test.name); - let result = std::panic::catch_unwind(AssertUnwindSafe(|| state.compound(remove_reduced()))); + let result = std::panic::catch_unwind(AssertUnwindSafe(|| state.compound(remove_reduced(), &test.context))); assert!(result.is_err(), "{}: trying to remove reduced when counter is zero must panic", test.name); } #[test] #[allow(clippy::redundant_clone)] fn test_utxos_changed_compounding() { + kaspa_core::log::try_init_logger("trace,kaspa_notify=trace"); let a_stock = get_3_addresses(true); let a = |indexes: &[usize]| indexes.iter().map(|idx| (a_stock[*idx]).clone()).collect::>(); let m = |command: Command, indexes: &[usize]| -> Mutation { - Mutation { command, scope: Scope::UtxosChanged(UtxosChangedScope { addresses: a(indexes) }) } + Mutation { command, scope: Scope::UtxosChanged(UtxosChangedScope::new(a(indexes))) } }; let none = Box::::default; @@ -362,6 +375,7 @@ mod tests { let test = Test { name: "UtxosChanged", + context: SubscriptionContext::new(), initial_state: none(), steps: vec![ Step { name: "add all 1", mutation: add_all(), result: Some(add_all()) }, @@ -383,14 +397,20 @@ mod tests { Step { name: "remove all 1, revealing a0", mutation: remove_all(), result: Some(add_0()) }, Step { name: "remove a0", mutation: remove_0(), result: Some(remove_0()) }, ], - final_state: none(), + final_state: Box::new(UtxosChangedSubscription { + all: 0, + indexes: Counters::with_counters(vec![ + Counter { index: 0, count: 0, locked: true }, + Counter { index: 1, count: 0, locked: false }, + ]), + }), }; let mut state = test.run(); // Removing once more must panic - let result = std::panic::catch_unwind(AssertUnwindSafe(|| state.compound(remove_all()))); + let result = std::panic::catch_unwind(AssertUnwindSafe(|| state.compound(remove_all(), &test.context))); assert!(result.is_err(), "{}: trying to remove all when counter is zero must panic", test.name); - let result = std::panic::catch_unwind(AssertUnwindSafe(|| state.compound(remove_0()))); - assert!(result.is_err(), "{}: trying to remove an address when its counter is zero must panic", test.name); + // let result = std::panic::catch_unwind(AssertUnwindSafe(|| state.compound(remove_0(), &test.context))); + // assert!(result.is_err(), "{}: trying to remove an address when its counter is zero must panic", test.name); } } diff --git a/notify/src/subscription/context.rs b/notify/src/subscription/context.rs new file mode 100644 index 000000000..c5faf35a2 --- /dev/null +++ b/notify/src/subscription/context.rs @@ -0,0 +1,414 @@ +use crate::{ + address::tracker::Tracker, + listener::ListenerId, + subscription::{ + single::{UtxosChangedState, UtxosChangedSubscription}, + DynSubscription, + }, +}; +use std::{ops::Deref, sync::Arc}; + +#[cfg(test)] +use kaspa_addresses::Address; + +#[derive(Debug)] +pub struct SubscriptionContextInner { + pub address_tracker: Tracker, + pub utxos_changed_subscription_to_all: DynSubscription, +} + +impl SubscriptionContextInner { + const CONTEXT_LISTENER_ID: ListenerId = ListenerId::MAX; + + pub fn new() -> Self { + Self::with_options(None) + } + + pub fn with_options(max_addresses: Option) -> Self { + let address_tracker = Tracker::new(max_addresses); + let utxos_changed_subscription_all = + Arc::new(UtxosChangedSubscription::new(UtxosChangedState::All, Self::CONTEXT_LISTENER_ID)); + Self { address_tracker, utxos_changed_subscription_to_all: utxos_changed_subscription_all } + } + + #[cfg(test)] + pub fn with_addresses(addresses: &[Address]) -> Self { + let address_tracker = Tracker::with_addresses(addresses); + let utxos_changed_subscription_all = + Arc::new(UtxosChangedSubscription::new(UtxosChangedState::All, Self::CONTEXT_LISTENER_ID)); + Self { address_tracker, utxos_changed_subscription_to_all: utxos_changed_subscription_all } + } +} + +impl Default for SubscriptionContextInner { + fn default() -> Self { + Self::new() + } +} + +#[derive(Clone, Debug, Default)] +pub struct SubscriptionContext { + inner: Arc, +} + +impl SubscriptionContext { + pub fn new() -> Self { + Self::with_options(None) + } + + pub fn with_options(max_addresses: Option) -> Self { + let inner = Arc::new(SubscriptionContextInner::with_options(max_addresses)); + Self { inner } + } + + #[cfg(test)] + pub fn with_addresses(addresses: &[Address]) -> Self { + let inner = Arc::new(SubscriptionContextInner::with_addresses(addresses)); + Self { inner } + } +} + +impl Deref for SubscriptionContext { + type Target = SubscriptionContextInner; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +#[cfg(test)] +mod tests { + use crate::{ + address::tracker::{CounterMap, Index, IndexSet, Indexer, RefCount}, + subscription::SubscriptionContext, + }; + use itertools::Itertools; + use kaspa_addresses::{Address, Prefix}; + use kaspa_alloc::init_allocator_with_default_settings; + use kaspa_core::trace; + use kaspa_math::Uint256; + use std::collections::{HashMap, HashSet}; + use workflow_perf_monitor::mem::get_process_memory_info; + + fn create_addresses(count: usize) -> Vec
{ + (0..count) + .map(|i| Address::new(Prefix::Mainnet, kaspa_addresses::Version::PubKey, &Uint256::from_u64(i as u64).to_le_bytes())) + .collect() + } + + fn measure_consumed_memory Vec, F2: FnOnce(&T) -> (usize, usize)>( + item_len: usize, + num_items: usize, + ctor: F, + length_and_capacity: F2, + ) -> Vec { + let before = get_process_memory_info().unwrap(); + + trace!("Creating items..."); + let items = ctor(); + + let after = get_process_memory_info().unwrap(); + + trace!("Required item length: {}", item_len); + trace!("Memory consumed: {}", (after.resident_set_size - before.resident_set_size) / num_items as u64); + trace!( + "Memory/idx: {}", + ((after.resident_set_size - before.resident_set_size) as f64 / num_items as f64 / item_len as f64 * 10.0).round() / 10.0 + ); + + let (len, capacity) = length_and_capacity(&items[0]); + match len > 0 { + true => trace!( + "Actual item: len = {}, capacity = {}, free space = +{:.1}%", + len, + capacity, + (capacity - len) as f64 * 100.0 / len as f64 + ), + false => trace!("Actual item: len = {}, capacity = {}", len, capacity), + } + + items + } + + fn init_and_measure_consumed_memory Vec, F2: FnOnce(&T) -> (usize, usize)>( + item_len: usize, + num_items: usize, + ctor: F, + length_and_capacity: F2, + ) -> Vec { + init_allocator_with_default_settings(); + kaspa_core::log::try_init_logger("INFO,kaspa_notify::subscription::context=trace"); + measure_consumed_memory(item_len, num_items, ctor, length_and_capacity) + } + + #[test] + #[ignore = "measuring consumed memory"] + // ITEM = SubscriptionContext + // (measuring IndexMap) + // + // ITEM_LEN NUM_ITEMS MEMORY/ITEM MEM/ADDR + // -------------------------------------------------- + // 10_000_000 5 1_098_744_627 109.9 + // 1_000_000 50 103_581_696 104.0 + // 100_000 100 9_157_836 91.6 + // 10_000 1_000 977_666 97.8 + // 1_000 10_000 94_633 94.6 + // 100 100_000 9_617 96.2 + // 10 1_000_000 1_325 132.5 + // 1 10_000_000 410 410.0 + fn test_subscription_context_size() { + const ITEM_LEN: usize = 10_000_000; + const NUM_ITEMS: usize = 5; + + init_allocator_with_default_settings(); + kaspa_core::log::try_init_logger("INFO,kaspa_notify::subscription::context=trace"); + + trace!("Creating addresses..."); + let addresses = create_addresses(ITEM_LEN); + + let _ = measure_consumed_memory( + ITEM_LEN, + NUM_ITEMS, + || (0..NUM_ITEMS).map(|_| SubscriptionContext::with_addresses(&addresses)).collect_vec(), + |x| (x.address_tracker.len(), x.address_tracker.capacity()), + ); + } + + #[test] + #[ignore = "measuring consumed memory"] + // ITEM = HashMap + // + // ITEM_LEN NUM_ITEMS MEMORY/ITEM MEM/IDX + // -------------------------------------------------- + // 10_000_000 10 151_214_489 15.1 + // 1_000_000 100 18_926_059 18.9 + // 100_000 1_000 1_187_864 11.9 + // 10_000 10_000 152_063 15.2 + // 1_000 100_000 20_576 20.6 + // 100 1_000_000 1_336 13.4 + // 10 10_000_000 241 24.1 + // 1 10_000_000 128 128.4 + fn test_hash_map_u32_u16_size() { + const ITEM_LEN: usize = 1; + const NUM_ITEMS: usize = 10_000_000; + + let _ = init_and_measure_consumed_memory( + ITEM_LEN, + NUM_ITEMS, + || { + (0..NUM_ITEMS) + .map(|_| (0..ITEM_LEN as Index).map(|i| (i, (ITEM_LEN as Index - i) as RefCount)).rev().collect::>()) + .collect_vec() + }, + |x| (x.len(), x.capacity()), + ); + } + + #[test] + #[ignore = "measuring consumed memory"] + // ITEM = CounterMap + // (measuring HashMap) + // + // ITEM_LEN NUM_ITEMS MEMORY/ITEM MEM/IDX + // -------------------------------------------------- + // 10_000_000 10 151_239_065 15.1 + // 1_000_000 100 18_927_534 18.9 + // 100_000 1_000 1_188_024 11.9 + // 10_000 10_000 152_077 15.2 + // 1_000 100_000 20_587 20.6 + // 100 1_000_000 1_344 13.4 + // 10 10_000_000 249 24.9 + // 1 10_000_000 136 136.5 + fn test_counter_map_size() { + const ITEM_LEN: usize = 10; + const NUM_ITEMS: usize = 10_000_000; + + let _ = init_and_measure_consumed_memory( + ITEM_LEN, + NUM_ITEMS, + || { + (0..NUM_ITEMS) + .map(|_| { + // Reserve the required capacity + // Note: the resulting allocated HashMap bucket count is (capacity * 8 / 7).next_power_of_two() + let mut item = CounterMap::with_capacity(ITEM_LEN); + + (0..ITEM_LEN as Index).for_each(|x| { + item.insert(x); + }); + item + }) + .collect_vec() + }, + |x| (x.len(), x.capacity()), + ); + } + + #[test] + #[ignore = "measuring consumed memory"] + // ITEM = HashSet + // + // ITEM_LEN NUM_ITEMS MEMORY/ITEM MEM/IDX + // -------------------------------------------------- + // 10_000_000 10 84'094'976 8.4 + // 1_000_000 100 10'524'508 10.5 + // 100_000 1_000 662_720 6.6 + // 10_000 10_000 86_369 8.6 + // 1_000 100_000 12_372 12.4 + // 100 1_000_000 821 8.2 + // 10 10_000_000 144 14.4 + // 1 10_000_000 112 112.0 + fn test_hash_set_u32_size() { + const ITEM_LEN: usize = 1_000_000; + const NUM_ITEMS: usize = 100; + + let _ = init_and_measure_consumed_memory( + ITEM_LEN, + NUM_ITEMS, + || (0..NUM_ITEMS).map(|_| (0..ITEM_LEN as Index).rev().collect::>()).collect_vec(), + |x| (x.len(), x.capacity()), + ); + } + + #[test] + #[ignore = "measuring consumed memory"] + // ITEM = HashSet emptied + // + // ITEM_LEN NUM_ITEMS MEMORY/ITEM MEM/IDX + // -------------------------------------------------- + // 10_000_000 10 84'094'976 8.4 + // 1_000_000 100 10'524'508 10.5 + // 100_000 1_000 662_720 6.6 + // 10_000 10_000 86_369 8.6 + // 1_000 100_000 12_372 12.4 + // 100 1_000_000 821 8.2 + // 10 10_000_000 144 14.4 + // 1 10_000_000 112 112.0 + fn test_emptied_hash_set_u32_size() { + const ITEM_LEN: usize = 1_000_000; + const NUM_ITEMS: usize = 100; + + let _ = init_and_measure_consumed_memory( + ITEM_LEN, + NUM_ITEMS, + || { + (0..NUM_ITEMS) + .map(|_| { + let mut set = (0..ITEM_LEN as Index).rev().collect::>(); + let original_capacity = set.capacity(); + let _ = set.drain(); + assert!(set.is_empty()); + assert_eq!(original_capacity, set.capacity()); + set + }) + .collect_vec() + }, + |x| (x.len(), x.capacity()), + ); + } + + #[test] + #[ignore = "measuring consumed memory"] + // ITEM = IndexSet + // (measuring HashSet) + // + // ITEM_LEN NUM_ITEMS MEMORY/ITEM MEM/IDX + // -------------------------------------------------- + // 10_000_000 10 84_119_961 8.4 + // 1_000_000 100 10_526_720 10.5 + // 100_000 1_000 662_974 6.6 + // 10_000 10_000 86_424 8.6 + // 1_000 100_000 12_381 12.4 + // 100 1_000_000 830 8.3 + // 10 10_000_000 152 15.2 + // 1 10_000_000 120 120.0 + fn test_index_set_size() { + const ITEM_LEN: usize = 10_000_000; + const NUM_ITEMS: usize = 10; + + let _ = init_and_measure_consumed_memory( + ITEM_LEN, + NUM_ITEMS, + || { + (0..NUM_ITEMS) + .map(|_| { + // Reserve the required capacity + // Note: the resulting allocated HashSet bucket count is (capacity * 8 / 7).next_power_of_two() + let mut item = IndexSet::with_capacity(ITEM_LEN); + + (0..ITEM_LEN as Index).for_each(|x| { + item.insert(x); + }); + item + }) + .collect_vec() + }, + |x| (x.len(), x.capacity()), + ); + } + + #[test] + #[ignore = "measuring consumed memory"] + // ITEM = Vec + // + // ITEM_LEN NUM_ITEMS MEMORY/ITEM MEM/IDX + // -------------------------------------------------- + // 10_000_000 10 40_208_384 4.0 + // 1_000_000 100 4_026_245 4.0 + // 100_000 1_000 403_791 4.0 + // 10_000 10_000 41_235 4.1 + // 1_000 100_000 4_141 4.1 + // 100 1_000_000 478 4.8 + // 10 10_000_000 72 7.2 + // 1 10_000_000 32 32.0 + fn test_vec_u32_size() { + const ITEM_LEN: usize = 10_000_000; + const NUM_ITEMS: usize = 10; + + let _ = init_and_measure_consumed_memory( + ITEM_LEN, + NUM_ITEMS, + || (0..NUM_ITEMS).map(|_| (0..ITEM_LEN as Index).collect::>()).collect_vec(), + |x| (x.len(), x.capacity()), + ); + } + // #[test] + // #[ignore = "measuring consumed memory"] + // // ITEM = DashSet + // // (measuring DashSet) + // // + // // ITEM_LEN NUM_ITEMS MEMORY/ITEM MEM/IDX + // // -------------------------------------------------- + // // 10_000_000 10 96_439_500 9.6 + // // 1_000_000 100 11_942_010 11.9 + // // 100_000 1_000 826_400 8.3 + // // 10_000 10_000 107_060 10.7 + // // 1_000 100_000 19_114 19.1 + // // 100 1_000_000 12_717 127.2 + // // 10 1_000_000 8_865 886.5 + // // 1 1_000_000 8_309 8309.0 + // fn test_dash_set_size() { + // const ITEM_LEN: usize = 1; + // const NUM_ITEMS: usize = 1_000_000; + + // init_allocator_with_default_settings(); + // kaspa_core::log::try_init_logger("INFO,kaspa_notify::subscription::context=trace"); + + // let before = get_process_memory_info().unwrap(); + // trace!("Creating sets..."); + // let sets = (0..NUM_ITEMS) + // .map(|_| { + // // Rely on organic growth rather than pre-defined capacity + // let item = DashSet::new(); + // (0..ITEM_LEN as Index).for_each(|x| { + // item.insert(x); + // }); + // item + // }) + // .collect_vec(); + + // let after = get_process_memory_info().unwrap(); + // trace!("Set length: {}", sets[0].len()); + // trace!("Memory consumed: {}", (after.resident_set_size - before.resident_set_size) / NUM_ITEMS as u64); + // } +} diff --git a/notify/src/subscription/mod.rs b/notify/src/subscription/mod.rs index f4a593524..6cded477d 100644 --- a/notify/src/subscription/mod.rs +++ b/notify/src/subscription/mod.rs @@ -1,7 +1,8 @@ -use super::{events::EventType, notification::Notification, scope::Scope}; +use crate::{error::Result, events::EventType, notification::Notification, scope::Scope, subscription::context::SubscriptionContext}; use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; use std::fmt::Display; +use std::ops::Deref; use std::{ any::Any, fmt::Debug, @@ -11,6 +12,7 @@ use std::{ pub mod array; pub mod compounded; +pub mod context; pub mod single; #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] @@ -46,7 +48,29 @@ impl From for Command { } } -/// A subscription mutation including a start/stop command and +/// Defines how an incoming UtxosChanged mutation must be propagated upwards +#[derive(Clone, Copy, Default, Debug, PartialEq, Eq)] +pub enum UtxosChangedMutationPolicy { + /// Mutation granularity defined at address level + #[default] + AddressSet, + + /// Mutation granularity reduced to all or nothing + Wildcard, +} + +#[derive(Clone, Copy, Default, Debug)] +pub struct MutationPolicies { + pub utxo_changed: UtxosChangedMutationPolicy, +} + +impl MutationPolicies { + pub fn new(utxo_changed: UtxosChangedMutationPolicy) -> Self { + Self { utxo_changed } + } +} + +/// A subscription mutation formed by a start/stop command and /// a notification scope. #[derive(Clone, Debug, PartialEq, Eq)] pub struct Mutation { @@ -73,11 +97,11 @@ impl Mutation { pub trait Subscription { fn event_type(&self) -> EventType; fn active(&self) -> bool; - fn scope(&self) -> Scope; + fn scope(&self, context: &SubscriptionContext) -> Scope; } pub trait Compounded: Subscription + AsAny + DynEq + CompoundedClone + Debug + Send + Sync { - fn compound(&mut self, mutation: Mutation) -> Option; + fn compound(&mut self, mutation: Mutation, context: &SubscriptionContext) -> Option; } impl PartialEq for dyn Compounded { @@ -89,8 +113,96 @@ impl Eq for dyn Compounded {} pub type CompoundedSubscription = Box; -pub trait Single: Subscription + AsAny + DynHash + DynEq + SingleClone + Debug + Send + Sync { - fn mutate(&mut self, mutation: Mutation) -> Option>; +/// The result of applying a [`Mutation`] to a [`DynSubscription`] +pub struct MutationOutcome { + /// Optional new mutated subscription state + pub mutated: Option, + + /// Mutations applied to the [`DynSubscription`] + pub mutations: Vec, +} + +impl MutationOutcome { + pub fn new() -> Self { + Self { mutated: None, mutations: vec![] } + } + + pub fn with_mutations(mutations: Vec) -> Self { + Self { mutated: None, mutations } + } + + pub fn with_mutated(mutated: DynSubscription, mutations: Vec) -> Self { + Self { mutated: Some(mutated), mutations } + } + + /// Updates `target` to the mutated state if any, otherwise leave `target` as is. + pub fn apply_to(self, target: &mut DynSubscription) -> Self { + if let Some(ref mutated) = self.mutated { + *target = mutated.clone(); + } + self + } + + #[inline(always)] + pub fn has_new_state(&self) -> bool { + self.mutated.is_some() + } + + #[inline(always)] + pub fn has_changes(&self) -> bool { + self.has_new_state() || !self.mutations.is_empty() + } +} + +impl Default for MutationOutcome { + fn default() -> Self { + Self::new() + } +} + +/// A single subscription (as opposed to a compounded one) +pub trait Single: Subscription + AsAny + DynHash + DynEq + Debug + Send + Sync { + /// Applies a [`Mutation`] to a single subscription. + /// + /// On success, returns both an optional new state and the mutations, if any, resulting of the process. + /// + /// Implementation guidelines: + /// + /// - If the processing of the mutation yields no change, the returned outcome must have no new state and no mutations + /// otherwise the outcome should contain both a new state (see next point for exception) and some mutations. + /// - If the subscription has inner mutability and its current state and incoming mutation do allow an inner mutation, + /// the outcome new state must be empty. + fn apply_mutation( + &self, + arc_self: &Arc, + mutation: Mutation, + policies: MutationPolicies, + context: &SubscriptionContext, + ) -> Result; +} + +pub trait MutateSingle: Deref { + /// Applies a [`Mutation`] to a single subscription. + /// + /// On success, updates `self` to the new state if any and returns both the optional new state and the mutations + /// resulting of the process as a [`MutationOutcome`]. + fn mutate(&mut self, mutation: Mutation, policies: MutationPolicies, context: &SubscriptionContext) -> Result; +} + +impl MutateSingle for Arc { + fn mutate(&mut self, mutation: Mutation, policies: MutationPolicies, context: &SubscriptionContext) -> Result { + let outcome = self.apply_mutation(self, mutation, policies, context)?.apply_to(self); + Ok(outcome) + } +} + +pub trait BroadcastingSingle: Deref { + /// Returns the broadcasting instance of the subscription. + /// + /// This is used for grouping all the wildcard UtxosChanged subscriptions under + /// the same unique instance in the broadcaster plans, allowing message optimizations + /// during broadcasting of the notifications. + fn broadcasting(self, context: &SubscriptionContext) -> DynSubscription; } impl Hash for dyn Single { @@ -105,7 +217,6 @@ impl PartialEq for dyn Single { } impl Eq for dyn Single {} -pub type SingleSubscription = Box; pub type DynSubscription = Arc; pub trait AsAny { @@ -140,7 +251,6 @@ impl DynEq for T { } pub trait CompoundedClone { - fn clone_arc(&self) -> Arc; fn clone_box(&self) -> Box; } @@ -148,33 +258,11 @@ impl CompoundedClone for T where T: 'static + Compounded + Clone, { - fn clone_arc(&self) -> Arc { - Arc::new(self.clone()) - } - fn clone_box(&self) -> Box { Box::new(self.clone()) } } -pub trait SingleClone { - fn clone_arc(&self) -> Arc; - fn clone_box(&self) -> Box; -} - -impl SingleClone for T -where - T: 'static + Single + Clone, -{ - fn clone_arc(&self) -> Arc { - Arc::new(self.clone()) - } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } -} - pub trait ApplyTo { fn apply_to(&self, notification: &N) -> Option; } diff --git a/notify/src/subscription/single.rs b/notify/src/subscription/single.rs index 99246b7d1..c62133c08 100644 --- a/notify/src/subscription/single.rs +++ b/notify/src/subscription/single.rs @@ -1,17 +1,27 @@ -use super::{Mutation, Single, Subscription}; use crate::{ - address::UtxoAddress, + address::tracker::{Index, Indexes}, + error::Result, events::EventType, + listener::ListenerId, scope::{Scope, UtxosChangedScope, VirtualChainChangedScope}, - subscription::Command, + subscription::{ + context::SubscriptionContext, BroadcastingSingle, Command, DynSubscription, Mutation, MutationOutcome, MutationPolicies, + Single, Subscription, UtxosChangedMutationPolicy, + }, }; -use kaspa_addresses::Address; +use itertools::Itertools; +use kaspa_addresses::{Address, Prefix}; use kaspa_consensus_core::tx::ScriptPublicKey; -use kaspa_txscript::pay_to_address_script; +use kaspa_core::trace; +use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use std::{ - collections::HashMap, - fmt::Debug, + collections::hash_set, + fmt::{Debug, Display}, hash::{Hash, Hasher}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, }; /// Subscription with a all or none scope. @@ -30,14 +40,20 @@ impl OverallSubscription { } impl Single for OverallSubscription { - fn mutate(&mut self, mutation: Mutation) -> Option> { + fn apply_mutation( + &self, + _: &Arc, + mutation: Mutation, + _: MutationPolicies, + _: &SubscriptionContext, + ) -> Result { assert_eq!(self.event_type(), mutation.event_type()); - if self.active != mutation.active() { - self.active = mutation.active(); - Some(vec![mutation]) + Ok(if self.active != mutation.active() { + let mutated = Self::new(self.event_type, mutation.active()); + MutationOutcome::with_mutated(Arc::new(mutated), vec![mutation]) } else { - None - } + MutationOutcome::new() + }) } } @@ -52,7 +68,7 @@ impl Subscription for OverallSubscription { self.active } - fn scope(&self) -> Scope { + fn scope(&self, _context: &SubscriptionContext) -> Scope { self.event_type.into() } } @@ -74,9 +90,15 @@ impl VirtualChainChangedSubscription { } impl Single for VirtualChainChangedSubscription { - fn mutate(&mut self, mutation: Mutation) -> Option> { + fn apply_mutation( + &self, + _: &Arc, + mutation: Mutation, + _: MutationPolicies, + _: &SubscriptionContext, + ) -> Result { assert_eq!(self.event_type(), mutation.event_type()); - if let Scope::VirtualChainChanged(ref scope) = mutation.scope { + let result = if let Scope::VirtualChainChanged(ref scope) = mutation.scope { // Here we want the code to (almost) match a double entry table structure // by subscription state and by mutation #[allow(clippy::collapsible_else_if)] @@ -88,39 +110,36 @@ impl Single for VirtualChainChangedSubscription { } else { // Here is an exception to the aforementioned goal // Mutations Reduced and All - self.active = true; - self.include_accepted_transaction_ids = scope.include_accepted_transaction_ids; - Some(vec![mutation]) + let mutated = Self::new(true, scope.include_accepted_transaction_ids); + Some((Arc::new(mutated), vec![mutation])) } } else if !self.include_accepted_transaction_ids { // State Reduced if !mutation.active() { // Mutation None - self.active = false; - self.include_accepted_transaction_ids = false; - Some(vec![Mutation::new(Command::Stop, Scope::VirtualChainChanged(VirtualChainChangedScope::new(false)))]) + let mutated = Self::new(false, false); + Some((Arc::new(mutated), vec![Mutation::new(Command::Stop, VirtualChainChangedScope::new(false).into())])) } else if !scope.include_accepted_transaction_ids { // Mutation Reduced None } else { // Mutation All - self.include_accepted_transaction_ids = true; - Some(vec![ - Mutation::new(Command::Stop, Scope::VirtualChainChanged(VirtualChainChangedScope::new(false))), - mutation, - ]) + let mutated = Self::new(true, true); + Some(( + Arc::new(mutated), + vec![Mutation::new(Command::Stop, VirtualChainChangedScope::new(false).into()), mutation], + )) } } else { // State All if !mutation.active() { // Mutation None - self.active = false; - self.include_accepted_transaction_ids = false; - Some(vec![Mutation::new(Command::Stop, Scope::VirtualChainChanged(VirtualChainChangedScope::new(true)))]) + let mutated = Self::new(false, false); + Some((Arc::new(mutated), vec![Mutation::new(Command::Stop, VirtualChainChangedScope::new(true).into())])) } else if !scope.include_accepted_transaction_ids { // Mutation Reduced - self.include_accepted_transaction_ids = false; - Some(vec![mutation, Mutation::new(Command::Stop, Scope::VirtualChainChanged(VirtualChainChangedScope::new(true)))]) + let mutated = Self::new(true, false); + Some((Arc::new(mutated), vec![mutation, Mutation::new(Command::Stop, VirtualChainChangedScope::new(true).into())])) } else { // Mutation All None @@ -128,7 +147,12 @@ impl Single for VirtualChainChangedSubscription { } } else { None - } + }; + let outcome = match result { + Some((mutated, mutations)) => MutationOutcome::with_mutated(mutated, mutations), + None => MutationOutcome::new(), + }; + Ok(outcome) } } @@ -143,162 +167,388 @@ impl Subscription for VirtualChainChangedSubscription { self.active } - fn scope(&self) -> Scope { - Scope::VirtualChainChanged(VirtualChainChangedScope::new(self.include_accepted_transaction_ids)) + fn scope(&self, _context: &SubscriptionContext) -> Scope { + VirtualChainChangedScope::new(self.include_accepted_transaction_ids).into() + } +} + +static UTXOS_CHANGED_SUBSCRIPTIONS: AtomicUsize = AtomicUsize::new(0); + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum UtxosChangedMutation { + None, + Remove, + Add, + All, +} + +impl From<(Command, &UtxosChangedScope)> for UtxosChangedMutation { + fn from((command, scope): (Command, &UtxosChangedScope)) -> Self { + match (command, scope.addresses.is_empty()) { + (Command::Stop, true) => Self::None, + (Command::Stop, false) => Self::Remove, + (Command::Start, false) => Self::Add, + (Command::Start, true) => Self::All, + } + } +} + +#[derive(Debug, Clone, Copy, Default, Hash, PartialEq, Eq)] +pub enum UtxosChangedState { + /// Inactive + #[default] + None, + + /// Active on a set of selected addresses + Selected, + + /// Active on all addresses + All, +} + +impl UtxosChangedState { + pub fn active(&self) -> bool { + match self { + UtxosChangedState::None => false, + UtxosChangedState::Selected | UtxosChangedState::All => true, + } + } +} + +impl Display for UtxosChangedState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + UtxosChangedState::None => write!(f, "none"), + UtxosChangedState::Selected => write!(f, "selected"), + UtxosChangedState::All => write!(f, "all"), + } + } +} + +#[derive(Debug, Clone)] +pub struct UtxosChangedSubscriptionData { + /// State of the subscription + /// + /// Can be mutated without affecting neither equality nor hash of the struct + state: UtxosChangedState, + + /// Address indexes in `SubscriptionContext` + /// + /// Can be mutated without affecting neither equality nor hash of the struct + indexes: Indexes, +} + +impl UtxosChangedSubscriptionData { + fn with_capacity(state: UtxosChangedState, capacity: usize) -> Self { + let indexes = Indexes::with_capacity(capacity); + Self { state, indexes } + } + + #[inline(always)] + pub fn update_state(&mut self, new_state: UtxosChangedState) { + self.state = new_state; + } + + pub fn contains(&self, spk: &ScriptPublicKey, context: &SubscriptionContext) -> bool { + context.address_tracker.contains(&self.indexes, spk) + } + + pub fn len(&self) -> usize { + self.indexes.len() + } + + pub fn is_empty(&self) -> bool { + self.indexes.is_empty() + } + + pub fn capacity(&self) -> usize { + self.indexes.capacity() + } + + pub fn iter(&self) -> hash_set::Iter<'_, Index> { + self.indexes.iter() + } + + pub fn contains_address(&self, address: &Address, context: &SubscriptionContext) -> bool { + context.address_tracker.contains_address(&self.indexes, address) + } + + pub fn to_addresses(&self, prefix: Prefix, context: &SubscriptionContext) -> Vec
{ + self.indexes.iter().filter_map(|index| context.address_tracker.get_address_at_index(*index, prefix)).collect_vec() + } + + pub fn register(&mut self, addresses: Vec
, context: &SubscriptionContext) -> Result> { + Ok(context.address_tracker.register(&mut self.indexes, addresses)?) + } + + pub fn unregister(&mut self, addresses: Vec
, context: &SubscriptionContext) -> Vec
{ + context.address_tracker.unregister(&mut self.indexes, addresses) + } + + pub fn unregister_indexes(&mut self, context: &SubscriptionContext) -> Vec
{ + // TODO: consider using a provided prefix + let removed = self.to_addresses(Prefix::Mainnet, context); + context.address_tracker.unregister_indexes(&mut self.indexes); + removed + } + + pub fn to_all(&self) -> bool { + matches!(self.state, UtxosChangedState::All) + } +} + +impl Display for UtxosChangedSubscriptionData { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.state { + UtxosChangedState::None | UtxosChangedState::All => write!(f, "{}", self.state), + UtxosChangedState::Selected => write!(f, "{}({})", self.state, self.indexes.len()), + } } } -#[derive(Clone, Debug, Default)] +#[derive(Debug)] pub struct UtxosChangedSubscription { - active: bool, - addresses: HashMap, + /// Mutable inner data + data: RwLock, + + /// ID of the listener owning this subscription + /// + /// This fully determines both equality and hash. + listener_id: ListenerId, } impl UtxosChangedSubscription { - pub fn new(active: bool, addresses: Vec
) -> Self { - let mut subscription = Self { active, addresses: HashMap::default() }; - subscription.set_addresses(addresses); - subscription + pub fn new(state: UtxosChangedState, listener_id: ListenerId) -> Self { + Self::with_capacity(state, listener_id, 0) } - fn set_addresses(&mut self, addresses: Vec
) -> &mut Self { - self.addresses = addresses - .into_iter() - .map(|x| { - let utxo_address: UtxoAddress = x.into(); - (utxo_address.to_script_public_key(), utxo_address) - }) - .collect(); - self + pub fn with_capacity(state: UtxosChangedState, listener_id: ListenerId, capacity: usize) -> Self { + let data = RwLock::new(UtxosChangedSubscriptionData::with_capacity(state, capacity)); + let subscription = Self { data, listener_id }; + trace!( + "UtxosChangedSubscription: {} in total (new {})", + UTXOS_CHANGED_SUBSCRIPTIONS.fetch_add(1, Ordering::SeqCst) + 1, + subscription + ); + subscription } - pub fn insert_address(&mut self, address: &Address) -> bool { - let utxo_address: UtxoAddress = address.clone().into(); - self.addresses.insert(utxo_address.to_script_public_key(), utxo_address).is_none() + #[cfg(test)] + pub fn with_addresses(active: bool, addresses: Vec
, listener_id: ListenerId, context: &SubscriptionContext) -> Self { + let state = match (active, addresses.is_empty()) { + (false, _) => UtxosChangedState::None, + (true, false) => UtxosChangedState::Selected, + (true, true) => UtxosChangedState::All, + }; + let subscription = Self::with_capacity(state, listener_id, addresses.len()); + let _ = subscription.data_mut().register(addresses, context); + subscription } - pub fn contains_address(&self, address: &Address) -> bool { - self.addresses.contains_key(&pay_to_address_script(address)) + pub fn data(&self) -> RwLockReadGuard { + self.data.read() } - pub fn remove_address(&mut self, address: &Address) -> bool { - self.addresses.remove(&pay_to_address_script(address)).is_some() + pub fn data_mut(&self) -> RwLockWriteGuard { + self.data.write() } - pub fn addresses(&self) -> &HashMap { - &self.addresses + #[inline(always)] + pub fn state(&self) -> UtxosChangedState { + self.data().state } pub fn to_all(&self) -> bool { - self.addresses.is_empty() + matches!(self.data().state, UtxosChangedState::All) + } +} + +impl Clone for UtxosChangedSubscription { + fn clone(&self) -> Self { + let subscription = Self { data: RwLock::new(self.data().clone()), listener_id: self.listener_id }; + trace!( + "UtxosChangedSubscription: {} in total (clone {})", + UTXOS_CHANGED_SUBSCRIPTIONS.fetch_add(1, Ordering::SeqCst) + 1, + subscription + ); + subscription + } +} + +impl Display for UtxosChangedSubscription { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.data()) + } +} + +impl Drop for UtxosChangedSubscription { + fn drop(&mut self) { + trace!( + "UtxosChangedSubscription: {} in total (drop {})", + UTXOS_CHANGED_SUBSCRIPTIONS.fetch_sub(1, Ordering::SeqCst) - 1, + self + ); } } impl PartialEq for UtxosChangedSubscription { + /// Equality is specifically bound to the listener ID fn eq(&self, other: &Self) -> bool { - if self.active == other.active && self.addresses.len() == other.addresses.len() { - // HashMaps are considered equal if they contain the same keys - return self.addresses.keys().all(|x| other.addresses.contains_key(x)); - } - false + self.listener_id == other.listener_id } } impl Eq for UtxosChangedSubscription {} impl Hash for UtxosChangedSubscription { + /// Hash is specifically bound to the listener ID fn hash(&self, state: &mut H) { - self.active.hash(state); - - // Since item order in hash set is undefined, build a sorted vector - // so that hashing is determinist. - let mut items: Vec<&Address> = self.addresses.values().map(|x| &**x).collect::>(); - items.sort(); - items.hash(state); + self.listener_id.hash(state); } } impl Single for UtxosChangedSubscription { - fn mutate(&mut self, mutation: Mutation) -> Option> { - if let Scope::UtxosChanged(ref scope) = mutation.scope { - // Here we want the code to (almost) match a double entry table structure - // by subscription state and by mutation - #[allow(clippy::collapsible_else_if)] - if !self.active { - // State None - if !mutation.active() { - // Here is an exception to the aforementioned goal - // Mutations None and Remove(R) - None - } else { - // Here is an exception to the aforementioned goal - // Mutations Add(A) && All - self.active = true; - self.set_addresses(scope.addresses.clone()); - Some(vec![mutation]) + fn apply_mutation( + &self, + current: &Arc, + mutation: Mutation, + policies: MutationPolicies, + context: &SubscriptionContext, + ) -> Result { + assert_eq!(self.event_type(), mutation.event_type()); + let outcome = if let Scope::UtxosChanged(scope) = mutation.scope { + let mut data = self.data_mut(); + let state = data.state; + let mutation_type = UtxosChangedMutation::from((mutation.command, &scope)); + match (state, mutation_type) { + (UtxosChangedState::None, UtxosChangedMutation::None | UtxosChangedMutation::Remove) => { + // State None + Mutations None or Remove(R) => No change + MutationOutcome::new() } - } else if !self.addresses.is_empty() { - // State Selected(S) - if !mutation.active() { - if scope.addresses.is_empty() { - // Mutation None - self.active = false; - let removed = self.addresses.drain().map(|(_, x)| x.into()).collect(); - Some(vec![Mutation::new(Command::Stop, Scope::UtxosChanged(UtxosChangedScope::new(removed)))]) - } else { - // Mutation Remove(R) - let removed: Vec
= scope.addresses.iter().filter(|x| self.remove_address(x)).cloned().collect(); - if self.addresses.is_empty() { - self.active = false; + (UtxosChangedState::None, UtxosChangedMutation::Add) => { + // State None + Mutation Add(A) => Mutated new state Selected(A) + let addresses = data.register(scope.addresses, context)?; + data.update_state(UtxosChangedState::Selected); + let mutations = match policies.utxo_changed { + UtxosChangedMutationPolicy::AddressSet => { + vec![Mutation::new(mutation.command, UtxosChangedScope::new(addresses).into())] } - match removed.is_empty() { - false => Some(vec![Mutation::new(Command::Stop, Scope::UtxosChanged(UtxosChangedScope::new(removed)))]), - true => None, + UtxosChangedMutationPolicy::Wildcard => { + vec![Mutation::new(mutation.command, UtxosChangedScope::default().into())] } - } - } else { - if !scope.addresses.is_empty() { - // Mutation Add(A) - let added = scope.addresses.iter().filter(|x| self.insert_address(x)).cloned().collect::>(); - match added.is_empty() { - false => Some(vec![Mutation::new(Command::Start, Scope::UtxosChanged(UtxosChangedScope::new(added)))]), - true => None, + }; + MutationOutcome::with_mutated(current.clone(), mutations) + } + (UtxosChangedState::None, UtxosChangedMutation::All) => { + // State None + Mutation All => Mutated new state All + data.update_state(UtxosChangedState::All); + let mutations = vec![Mutation::new(mutation.command, UtxosChangedScope::default().into())]; + MutationOutcome::with_mutated(current.clone(), mutations) + } + (UtxosChangedState::Selected, UtxosChangedMutation::None) => { + // State Selected(S) + Mutation None => Mutated new state None + data.update_state(UtxosChangedState::None); + let removed = data.unregister_indexes(context); + assert!(!removed.is_empty(), "state Selected implies a non empty address set"); + let mutations = match policies.utxo_changed { + UtxosChangedMutationPolicy::AddressSet => { + vec![Mutation::new(Command::Stop, UtxosChangedScope::new(removed).into())] } - } else { - // Mutation All - let removed: Vec
= self.addresses.drain().map(|(_, x)| x.into()).collect(); - Some(vec![ - Mutation::new(Command::Stop, Scope::UtxosChanged(UtxosChangedScope::new(removed))), - Mutation::new(Command::Start, Scope::UtxosChanged(UtxosChangedScope::default())), - ]) - } + UtxosChangedMutationPolicy::Wildcard => { + vec![Mutation::new(Command::Stop, UtxosChangedScope::default().into())] + } + }; + MutationOutcome::with_mutated(current.clone(), mutations) } - } else { - // State All - if !mutation.active() { - if scope.addresses.is_empty() { - // Mutation None - self.active = false; - Some(vec![Mutation::new(Command::Stop, Scope::UtxosChanged(UtxosChangedScope::default()))]) - } else { - // Mutation Remove(R) - None + (UtxosChangedState::Selected, UtxosChangedMutation::Remove) => { + // State Selected(S) + Mutation Remove(R) => Mutated state Selected(S – R) or mutated new state None or no change + let removed = data.unregister(scope.addresses, context); + match (removed.is_empty(), data.indexes.is_empty()) { + (false, false) => { + let mutations = match policies.utxo_changed { + UtxosChangedMutationPolicy::AddressSet => { + vec![Mutation::new(Command::Stop, UtxosChangedScope::new(removed).into())] + } + UtxosChangedMutationPolicy::Wildcard => vec![], + }; + MutationOutcome::with_mutations(mutations) + } + (false, true) => { + data.update_state(UtxosChangedState::None); + let mutations = match policies.utxo_changed { + UtxosChangedMutationPolicy::AddressSet => { + vec![Mutation::new(Command::Stop, UtxosChangedScope::new(removed).into())] + } + UtxosChangedMutationPolicy::Wildcard => { + vec![Mutation::new(Command::Stop, UtxosChangedScope::default().into())] + } + }; + MutationOutcome::with_mutated(current.clone(), mutations) + } + (true, _) => MutationOutcome::new(), } - } else { - if !scope.addresses.is_empty() { - // Mutation Add(A) - scope.addresses.iter().for_each(|x| { - self.insert_address(x); - }); - Some(vec![mutation, Mutation::new(Command::Stop, Scope::UtxosChanged(UtxosChangedScope::default()))]) - } else { - // Mutation All - None + } + (UtxosChangedState::Selected, UtxosChangedMutation::Add) => { + // State Selected(S) + Mutation Add(A) => Mutated state Selected(A ∪ S) + let added = data.register(scope.addresses, context)?; + match added.is_empty() { + false => { + let mutations = match policies.utxo_changed { + UtxosChangedMutationPolicy::AddressSet => { + vec![Mutation::new(Command::Start, UtxosChangedScope::new(added).into())] + } + UtxosChangedMutationPolicy::Wildcard => vec![], + }; + MutationOutcome::with_mutations(mutations) + } + true => MutationOutcome::new(), } } + (UtxosChangedState::Selected, UtxosChangedMutation::All) => { + // State Selected(S) + Mutation All => Mutated new state All + let removed = data.unregister_indexes(context); + assert!(!removed.is_empty(), "state Selected implies a non empty address set"); + data.update_state(UtxosChangedState::All); + let mutations = match policies.utxo_changed { + UtxosChangedMutationPolicy::AddressSet => vec![ + Mutation::new(Command::Stop, UtxosChangedScope::new(removed).into()), + Mutation::new(Command::Start, UtxosChangedScope::default().into()), + ], + UtxosChangedMutationPolicy::Wildcard => vec![], + }; + MutationOutcome::with_mutated(current.clone(), mutations) + } + (UtxosChangedState::All, UtxosChangedMutation::None) => { + // State All + Mutation None => Mutated new state None + data.update_state(UtxosChangedState::None); + let mutations = vec![Mutation::new(Command::Stop, UtxosChangedScope::default().into())]; + MutationOutcome::with_mutated(current.clone(), mutations) + } + (UtxosChangedState::All, UtxosChangedMutation::Remove) => { + // State All + Mutation Remove(R) => No change + MutationOutcome::new() + } + (UtxosChangedState::All, UtxosChangedMutation::Add) => { + // State All + Mutation Add(A) => Mutated new state Selectee(A) + let added = data.register(scope.addresses, context)?; + data.update_state(UtxosChangedState::Selected); + let mutations = match policies.utxo_changed { + UtxosChangedMutationPolicy::AddressSet => vec![ + Mutation::new(Command::Start, UtxosChangedScope::new(added).into()), + Mutation::new(Command::Stop, UtxosChangedScope::default().into()), + ], + UtxosChangedMutationPolicy::Wildcard => vec![], + }; + MutationOutcome::with_mutated(current.clone(), mutations) + } + (UtxosChangedState::All, UtxosChangedMutation::All) => { + // State All <= Mutation All + MutationOutcome::new() + } } } else { - None - } + MutationOutcome::new() + }; + Ok(outcome) } } @@ -308,11 +558,27 @@ impl Subscription for UtxosChangedSubscription { } fn active(&self) -> bool { - self.active + self.state().active() + } + + fn scope(&self, context: &SubscriptionContext) -> Scope { + // TODO: consider using a provided prefix + UtxosChangedScope::new(self.data().to_addresses(Prefix::Mainnet, context)).into() } +} - fn scope(&self) -> Scope { - Scope::UtxosChanged(UtxosChangedScope::new(self.addresses.values().map(|x| &**x).cloned().collect())) +impl BroadcastingSingle for DynSubscription { + fn broadcasting(self, context: &SubscriptionContext) -> DynSubscription { + match self.event_type() { + EventType::UtxosChanged => { + let utxos_changed_subscription = self.as_any().downcast_ref::().unwrap(); + match utxos_changed_subscription.to_all() { + true => context.utxos_changed_subscription_to_all.clone(), + false => self, + } + } + _ => self, + } } } @@ -334,7 +600,7 @@ mod tests { fn new(left: usize, right: usize, should_match: bool) -> Self { Self { left, right, should_match } } - fn compare(&self, name: &str, subscriptions: &[SingleSubscription]) { + fn compare(&self, name: &str, subscriptions: &[DynSubscription]) { let equal = if self.should_match { "be equal" } else { "not be equal" }; // Compare Box dyn Single #[allow(clippy::op_ref)] @@ -355,8 +621,8 @@ mod tests { get_hash(&subscriptions[self.right]), ); // Compare Arc dyn Single - let left_arc = subscriptions[self.left].clone_arc(); - let right_arc = subscriptions[self.right].clone_arc(); + let left_arc = subscriptions[self.left].clone(); + let right_arc = subscriptions[self.right].clone(); assert_eq!( *left_arc == *right_arc, self.should_match, @@ -377,10 +643,11 @@ mod tests { struct Test { name: &'static str, - subscriptions: Vec, + subscriptions: Vec, comparisons: Vec, } + let context = SubscriptionContext::new(); let addresses = get_3_addresses(false); let mut sorted_addresses = addresses.clone(); sorted_addresses.sort(); @@ -389,19 +656,19 @@ mod tests { Test { name: "test basic overall subscription", subscriptions: vec![ - Box::new(OverallSubscription::new(EventType::BlockAdded, false)), - Box::new(OverallSubscription::new(EventType::BlockAdded, true)), - Box::new(OverallSubscription::new(EventType::BlockAdded, true)), + Arc::new(OverallSubscription::new(EventType::BlockAdded, false)), + Arc::new(OverallSubscription::new(EventType::BlockAdded, true)), + Arc::new(OverallSubscription::new(EventType::BlockAdded, true)), ], comparisons: vec![Comparison::new(0, 1, false), Comparison::new(0, 2, false), Comparison::new(1, 2, true)], }, Test { name: "test virtual selected parent chain changed subscription", subscriptions: vec![ - Box::new(VirtualChainChangedSubscription::new(false, false)), - Box::new(VirtualChainChangedSubscription::new(true, false)), - Box::new(VirtualChainChangedSubscription::new(true, true)), - Box::new(VirtualChainChangedSubscription::new(true, true)), + Arc::new(VirtualChainChangedSubscription::new(false, false)), + Arc::new(VirtualChainChangedSubscription::new(true, false)), + Arc::new(VirtualChainChangedSubscription::new(true, true)), + Arc::new(VirtualChainChangedSubscription::new(true, true)), ], comparisons: vec![ Comparison::new(0, 1, false), @@ -415,18 +682,35 @@ mod tests { Test { name: "test utxos changed subscription", subscriptions: vec![ - Box::new(UtxosChangedSubscription::new(false, vec![])), - Box::new(UtxosChangedSubscription::new(true, addresses[0..2].to_vec())), - Box::new(UtxosChangedSubscription::new(true, addresses[0..3].to_vec())), - Box::new(UtxosChangedSubscription::new(true, sorted_addresses[0..3].to_vec())), + Arc::new(UtxosChangedSubscription::with_addresses(false, vec![], 0, &context)), + Arc::new(UtxosChangedSubscription::with_addresses(true, addresses[0..2].to_vec(), 1, &context)), + Arc::new(UtxosChangedSubscription::with_addresses(true, addresses[0..3].to_vec(), 2, &context)), + Arc::new(UtxosChangedSubscription::with_addresses(true, sorted_addresses[0..3].to_vec(), 2, &context)), + Arc::new(UtxosChangedSubscription::with_addresses(true, vec![], 3, &context)), + Arc::new(UtxosChangedSubscription::with_addresses(true, vec![], 4, &context)), ], comparisons: vec![ + Comparison::new(0, 0, true), Comparison::new(0, 1, false), Comparison::new(0, 2, false), Comparison::new(0, 3, false), + Comparison::new(0, 4, false), + Comparison::new(0, 5, false), + Comparison::new(1, 1, true), Comparison::new(1, 2, false), Comparison::new(1, 3, false), + Comparison::new(1, 4, false), + Comparison::new(1, 5, false), + Comparison::new(2, 2, true), + Comparison::new(2, 3, true), + Comparison::new(2, 4, false), + Comparison::new(2, 5, false), Comparison::new(3, 3, true), + Comparison::new(3, 4, false), + Comparison::new(3, 5, false), + Comparison::new(4, 4, true), + Comparison::new(4, 5, false), + Comparison::new(5, 5, true), ], }, ]; @@ -446,10 +730,10 @@ mod tests { struct MutationTest { name: &'static str, - state: SingleSubscription, + state: DynSubscription, mutation: Mutation, - new_state: SingleSubscription, - result: Option>, + new_state: DynSubscription, + outcome: MutationOutcome, } struct MutationTests { @@ -457,25 +741,30 @@ mod tests { } impl MutationTests { + pub const LISTENER_ID: ListenerId = 1; + fn new(tests: Vec) -> Self { Self { tests } } - fn run(&self) { + fn run(&self, context: &SubscriptionContext) { for test in self.tests.iter() { - let mut new_state = test.state.clone_box(); - let result = new_state.mutate(test.mutation.clone()); + let mut new_state = test.state.clone(); + let outcome = new_state.mutate(test.mutation.clone(), Default::default(), context).unwrap(); assert_eq!(test.new_state.active(), new_state.active(), "Testing '{}': wrong new state activity", test.name); assert_eq!(*test.new_state, *new_state, "Testing '{}': wrong new state", test.name); - assert_eq!(test.result, result, "Testing '{}': wrong result", test.name); + assert_eq!(test.outcome.has_new_state(), outcome.has_new_state(), "Testing '{}': wrong new state presence", test.name); + assert_eq!(test.outcome.mutations, outcome.mutations, "Testing '{}': wrong mutations", test.name); } } } #[test] fn test_overall_mutation() { - fn s(active: bool) -> SingleSubscription { - Box::new(OverallSubscription { event_type: EventType::BlockAdded, active }) + let context = SubscriptionContext::new(); + + fn s(active: bool) -> DynSubscription { + Arc::new(OverallSubscription { event_type: EventType::BlockAdded, active }) } fn m(command: Command) -> Mutation { Mutation { command, scope: Scope::BlockAdded(BlockAddedScope {}) } @@ -496,37 +785,39 @@ mod tests { state: none(), mutation: start_all(), new_state: all(), - result: Some(vec![start_all()]), + outcome: MutationOutcome::with_mutated(all(), vec![start_all()]), }, MutationTest { name: "OverallSubscription None to None", state: none(), mutation: stop_all(), new_state: none(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "OverallSubscription All to All", state: all(), mutation: start_all(), new_state: all(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "OverallSubscription All to None", state: all(), mutation: stop_all(), new_state: none(), - result: Some(vec![stop_all()]), + outcome: MutationOutcome::with_mutated(none(), vec![stop_all()]), }, ]); - tests.run() + tests.run(&context) } #[test] fn test_virtual_chain_changed_mutation() { - fn s(active: bool, include_accepted_transaction_ids: bool) -> SingleSubscription { - Box::new(VirtualChainChangedSubscription { active, include_accepted_transaction_ids }) + let context = SubscriptionContext::new(); + + fn s(active: bool, include_accepted_transaction_ids: bool) -> DynSubscription { + Arc::new(VirtualChainChangedSubscription { active, include_accepted_transaction_ids }) } fn m(command: Command, include_accepted_transaction_ids: bool) -> Mutation { Mutation { command, scope: Scope::VirtualChainChanged(VirtualChainChangedScope { include_accepted_transaction_ids }) } @@ -550,98 +841,102 @@ mod tests { state: none(), mutation: start_all(), new_state: all(), - result: Some(vec![start_all()]), + outcome: MutationOutcome::with_mutated(all(), vec![start_all()]), }, MutationTest { name: "VirtualChainChangedSubscription None to Reduced", state: none(), mutation: start_reduced(), new_state: reduced(), - result: Some(vec![start_reduced()]), + outcome: MutationOutcome::with_mutated(reduced(), vec![start_reduced()]), }, MutationTest { name: "VirtualChainChangedSubscription None to None (stop reduced)", state: none(), mutation: stop_reduced(), new_state: none(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "VirtualChainChangedSubscription None to None (stop all)", state: none(), mutation: stop_all(), new_state: none(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "VirtualChainChangedSubscription Reduced to All", state: reduced(), mutation: start_all(), new_state: all(), - result: Some(vec![stop_reduced(), start_all()]), + outcome: MutationOutcome::with_mutated(all(), vec![stop_reduced(), start_all()]), }, MutationTest { name: "VirtualChainChangedSubscription Reduced to Reduced", state: reduced(), mutation: start_reduced(), new_state: reduced(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "VirtualChainChangedSubscription Reduced to None (stop reduced)", state: reduced(), mutation: stop_reduced(), new_state: none(), - result: Some(vec![stop_reduced()]), + outcome: MutationOutcome::with_mutated(none(), vec![stop_reduced()]), }, MutationTest { name: "VirtualChainChangedSubscription Reduced to None (stop all)", state: reduced(), mutation: stop_all(), new_state: none(), - result: Some(vec![stop_reduced()]), + outcome: MutationOutcome::with_mutated(none(), vec![stop_reduced()]), }, MutationTest { name: "VirtualChainChangedSubscription All to All", state: all(), mutation: start_all(), new_state: all(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "VirtualChainChangedSubscription All to Reduced", state: all(), mutation: start_reduced(), new_state: reduced(), - result: Some(vec![start_reduced(), stop_all()]), + outcome: MutationOutcome::with_mutated(reduced(), vec![start_reduced(), stop_all()]), }, MutationTest { name: "VirtualChainChangedSubscription All to None (stop reduced)", state: all(), mutation: stop_reduced(), new_state: none(), - result: Some(vec![stop_all()]), + outcome: MutationOutcome::with_mutated(none(), vec![stop_all()]), }, MutationTest { name: "VirtualChainChangedSubscription All to None (stop all)", state: all(), mutation: stop_all(), new_state: none(), - result: Some(vec![stop_all()]), + outcome: MutationOutcome::with_mutated(none(), vec![stop_all()]), }, ]); - tests.run() + tests.run(&context) } #[test] fn test_utxos_changed_mutation() { + let context = SubscriptionContext::new(); let a_stock = get_3_addresses(true); let av = |indexes: &[usize]| indexes.iter().map(|idx| (a_stock[*idx]).clone()).collect::>(); let ah = |indexes: &[usize]| indexes.iter().map(|idx| (a_stock[*idx]).clone()).collect::>(); - let s = |active: bool, indexes: &[usize]| Box::new(UtxosChangedSubscription::new(active, ah(indexes))) as SingleSubscription; + let s = |active: bool, indexes: &[usize]| { + Arc::new(UtxosChangedSubscription::with_addresses(active, ah(indexes).to_vec(), MutationTests::LISTENER_ID, &context)) + as DynSubscription + }; let m = |command: Command, indexes: &[usize]| -> Mutation { - Mutation { command, scope: Scope::UtxosChanged(UtxosChangedScope { addresses: av(indexes) }) } + Mutation { command, scope: Scope::UtxosChanged(UtxosChangedScope::new(av(indexes))) } }; // Subscriptions @@ -671,114 +966,114 @@ mod tests { state: none(), mutation: start_all(), new_state: all(), - result: Some(vec![start_all()]), + outcome: MutationOutcome::with_mutated(all(), vec![start_all()]), }, MutationTest { name: "UtxosChangedSubscription None to Selected 0 (add set)", state: none(), mutation: start_0(), new_state: selected_0(), - result: Some(vec![start_0()]), + outcome: MutationOutcome::with_mutated(selected_0(), vec![start_0()]), }, MutationTest { name: "UtxosChangedSubscription None to None (stop set)", state: none(), mutation: stop_0(), new_state: none(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "UtxosChangedSubscription None to None (stop all)", state: none(), mutation: stop_all(), new_state: none(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "UtxosChangedSubscription Selected 01 to All (add all)", state: selected_01(), mutation: start_all(), new_state: all(), - result: Some(vec![stop_01(), start_all()]), + outcome: MutationOutcome::with_mutated(all(), vec![stop_01(), start_all()]), }, MutationTest { name: "UtxosChangedSubscription Selected 01 to 01 (add set with total intersection)", state: selected_01(), mutation: start_1(), new_state: selected_01(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "UtxosChangedSubscription Selected 0 to 01 (add set with partial intersection)", state: selected_0(), mutation: start_01(), new_state: selected_01(), - result: Some(vec![start_1()]), + outcome: MutationOutcome::with_mutations(vec![start_1()]), }, MutationTest { name: "UtxosChangedSubscription Selected 2 to 012 (add set with no intersection)", state: selected_2(), mutation: start_01(), new_state: selected_012(), - result: Some(vec![start_01()]), + outcome: MutationOutcome::with_mutations(vec![start_01()]), }, MutationTest { name: "UtxosChangedSubscription Selected 01 to None (remove superset)", state: selected_1(), mutation: stop_01(), new_state: none(), - result: Some(vec![stop_1()]), + outcome: MutationOutcome::with_mutated(none(), vec![stop_1()]), }, MutationTest { name: "UtxosChangedSubscription Selected 01 to None (remove set with total intersection)", state: selected_01(), mutation: stop_01(), new_state: none(), - result: Some(vec![stop_01()]), + outcome: MutationOutcome::with_mutated(none(), vec![stop_01()]), }, MutationTest { name: "UtxosChangedSubscription Selected 02 to 2 (remove set with partial intersection)", state: selected_02(), mutation: stop_01(), new_state: selected_2(), - result: Some(vec![stop_0()]), + outcome: MutationOutcome::with_mutations(vec![stop_0()]), }, MutationTest { name: "UtxosChangedSubscription Selected 02 to 02 (remove set with no intersection)", state: selected_02(), mutation: stop_1(), new_state: selected_02(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "UtxosChangedSubscription All to All (add all)", state: all(), mutation: start_all(), new_state: all(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "UtxosChangedSubscription All to Selected 01 (add set)", state: all(), mutation: start_01(), new_state: selected_01(), - result: Some(vec![start_01(), stop_all()]), + outcome: MutationOutcome::with_mutated(selected_01(), vec![start_01(), stop_all()]), }, MutationTest { name: "UtxosChangedSubscription All to All (remove set)", state: all(), mutation: stop_01(), new_state: all(), - result: None, + outcome: MutationOutcome::new(), }, MutationTest { name: "UtxosChangedSubscription All to None (remove all)", state: all(), mutation: stop_all(), new_state: none(), - result: Some(vec![stop_all()]), + outcome: MutationOutcome::with_mutated(none(), vec![stop_all()]), }, ]); - tests.run() + tests.run(&context) } } diff --git a/rothschild/Cargo.toml b/rothschild/Cargo.toml index 2644d9e0e..2503374ac 100644 --- a/rothschild/Cargo.toml +++ b/rothschild/Cargo.toml @@ -10,20 +10,21 @@ license.workspace = true repository.workspace = true [dependencies] -kaspa-core.workspace = true +kaspa-addresses.workspace = true kaspa-consensus-core.workspace = true +kaspa-core.workspace = true kaspa-grpc-client.workspace = true +kaspa-notify.workspace = true kaspa-rpc-core.workspace = true -kaspa-addresses.workspace = true kaspa-txscript.workspace = true kaspa-utils.workspace = true -async-channel.workspace = true -parking_lot.workspace = true +async-channel.workspace = true clap.workspace = true faster-hex.workspace = true itertools.workspace = true log.workspace = true +parking_lot.workspace = true rayon.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std"] } tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } diff --git a/rothschild/src/main.rs b/rothschild/src/main.rs index e846702ba..f5bfe80ad 100644 --- a/rothschild/src/main.rs +++ b/rothschild/src/main.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; use clap::{Arg, ArgAction, Command}; use itertools::Itertools; -use kaspa_addresses::Address; +use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::{ config::params::{TESTNET11_PARAMS, TESTNET_PARAMS}, constants::{SOMPI_PER_KASPA, TX_VERSION}, @@ -12,6 +12,7 @@ use kaspa_consensus_core::{ }; use kaspa_core::{info, kaspad_env::version, time::unix_now, warn}; use kaspa_grpc_client::{ClientPool, GrpcClient}; +use kaspa_notify::subscription::context::SubscriptionContext; use kaspa_rpc_core::{api::rpc::RpcApi, notify::mode::NotificationMode}; use kaspa_txscript::pay_to_address_script; use parking_lot::Mutex; @@ -22,6 +23,8 @@ use tokio::time::{interval, MissedTickBehavior}; const DEFAULT_SEND_AMOUNT: u64 = 10 * SOMPI_PER_KASPA; const FEE_PER_MASS: u64 = 10; const MILLIS_PER_TICK: u64 = 10; +const ADDRESS_PREFIX: Prefix = Prefix::Testnet; +const ADDRESS_VERSION: Version = Version::PubKey; struct Stats { num_txs: usize, @@ -84,10 +87,19 @@ pub fn cli() -> Command { .arg(Arg::new("unleashed").long("unleashed").action(ArgAction::SetTrue).hide(true).help("Allow higher TPS")) } -async fn new_rpc_client(address: &str) -> GrpcClient { - GrpcClient::connect(NotificationMode::Direct, format!("grpc://{}", address), true, None, false, Some(500_000), Default::default()) - .await - .unwrap() +async fn new_rpc_client(subscription_context: &SubscriptionContext, address: &str) -> GrpcClient { + GrpcClient::connect_with_args( + NotificationMode::Direct, + format!("grpc://{}", address), + Some(subscription_context.clone()), + true, + None, + false, + Some(500_000), + Default::default(), + ) + .await + .unwrap() } struct ClientPoolArg { @@ -104,9 +116,11 @@ async fn main() { kaspa_core::log::init_logger(None, ""); let args = Args::parse(); let stats = Arc::new(Mutex::new(Stats { num_txs: 0, since: unix_now(), num_utxos: 0, utxos_amount: 0, num_outs: 0 })); - let rpc_client = GrpcClient::connect( + let subscription_context = SubscriptionContext::new(); + let rpc_client = GrpcClient::connect_with_args( NotificationMode::Direct, format!("grpc://{}", args.rpc_server), + Some(subscription_context.clone()), true, None, false, @@ -124,8 +138,7 @@ async fn main() { secp256k1::KeyPair::from_seckey_slice(secp256k1::SECP256K1, &private_key_bytes).unwrap() } else { let (sk, pk) = &secp256k1::generate_keypair(&mut thread_rng()); - let kaspa_addr = - Address::new(kaspa_addresses::Prefix::Testnet, kaspa_addresses::Version::PubKey, &pk.x_only_public_key().0.serialize()); + let kaspa_addr = Address::new(ADDRESS_PREFIX, ADDRESS_VERSION, &pk.x_only_public_key().0.serialize()); info!( "Generated private key {} and address {}. Send some funds to this address and rerun rothschild with `--private-key {}`", sk.display_secret(), @@ -135,11 +148,7 @@ async fn main() { return; }; - let kaspa_addr = Address::new( - kaspa_addresses::Prefix::Testnet, - kaspa_addresses::Version::PubKey, - &schnorr_key.x_only_public_key().0.serialize(), - ); + let kaspa_addr = Address::new(ADDRESS_PREFIX, ADDRESS_VERSION, &schnorr_key.x_only_public_key().0.serialize()); rayon::ThreadPoolBuilder::new().num_threads(args.threads as usize).build_global().unwrap(); @@ -168,10 +177,11 @@ async fn main() { const CLIENT_POOL_SIZE: usize = 8; let mut rpc_clients = Vec::with_capacity(CLIENT_POOL_SIZE); for _ in 0..CLIENT_POOL_SIZE { - rpc_clients.push(Arc::new(new_rpc_client(&args.rpc_server).await)); + rpc_clients.push(Arc::new(new_rpc_client(&subscription_context, &args.rpc_server).await)); } - let submit_tx_pool = ClientPool::new(rpc_clients, 1000, |c, arg: ClientPoolArg| async move { + let submit_tx_pool = ClientPool::new(rpc_clients, 1000); + let _ = submit_tx_pool.start(|c, arg: ClientPoolArg| async move { let ClientPoolArg { tx, stats, selected_utxos_len, selected_utxos_amount, pending_len, utxos_len } = arg; match c.submit_transaction(tx.as_ref().into(), false).await { Ok(_) => { diff --git a/rpc/core/src/api/notifications.rs b/rpc/core/src/api/notifications.rs index 52b015ec9..977313d14 100644 --- a/rpc/core/src/api/notifications.rs +++ b/rpc/core/src/api/notifications.rs @@ -5,6 +5,7 @@ use kaspa_notify::{ events::EventType, notification::{full_featured, Notification as NotificationTrait}, subscription::{ + context::SubscriptionContext, single::{OverallSubscription, UtxosChangedSubscription, VirtualChainChangedSubscription}, Subscription, }, @@ -64,14 +65,18 @@ impl Notification { } impl NotificationTrait for Notification { - fn apply_overall_subscription(&self, subscription: &OverallSubscription) -> Option { + fn apply_overall_subscription(&self, subscription: &OverallSubscription, _context: &SubscriptionContext) -> Option { match subscription.active() { true => Some(self.clone()), false => None, } } - fn apply_virtual_chain_changed_subscription(&self, subscription: &VirtualChainChangedSubscription) -> Option { + fn apply_virtual_chain_changed_subscription( + &self, + subscription: &VirtualChainChangedSubscription, + _context: &SubscriptionContext, + ) -> Option { match subscription.active() { true => { if let Notification::VirtualChainChanged(ref payload) = self { @@ -89,11 +94,15 @@ impl NotificationTrait for Notification { } } - fn apply_utxos_changed_subscription(&self, subscription: &UtxosChangedSubscription) -> Option { + fn apply_utxos_changed_subscription( + &self, + subscription: &UtxosChangedSubscription, + context: &SubscriptionContext, + ) -> Option { match subscription.active() { true => { let Self::UtxosChanged(notification) = self else { return None }; - notification.apply_utxos_changed_subscription(subscription).map(Self::UtxosChanged) + notification.apply_utxos_changed_subscription(subscription, context).map(Self::UtxosChanged) } false => None, } diff --git a/rpc/core/src/error.rs b/rpc/core/src/error.rs index ed0a7965a..21525db63 100644 --- a/rpc/core/src/error.rs +++ b/rpc/core/src/error.rs @@ -68,7 +68,7 @@ pub enum RpcError { #[error("Requested window size {0} is larger than pruning point depth {1}.")] WindowSizeExceedingPruningDepth(u32, u64), - #[error("Method unavailable in safe mode. Run the node with --unsafe argument.")] + #[error("Method unavailable in safe mode. Run the node with --unsaferpc argument.")] UnavailableInSafeMode, #[error("Cannot ban IP {0} because it has some permanent connection.")] diff --git a/rpc/core/src/model/message.rs b/rpc/core/src/model/message.rs index 5c55e6fa0..8538558e5 100644 --- a/rpc/core/src/model/message.rs +++ b/rpc/core/src/model/message.rs @@ -2,7 +2,7 @@ use crate::model::*; use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_consensus_core::api::stats::BlockCount; use kaspa_core::debug; -use kaspa_notify::subscription::{single::UtxosChangedSubscription, Command}; +use kaspa_notify::subscription::{context::SubscriptionContext, single::UtxosChangedSubscription, Command}; use serde::{Deserialize, Serialize}; use std::{ fmt::{Display, Formatter}, @@ -984,23 +984,32 @@ pub struct UtxosChangedNotification { } impl UtxosChangedNotification { - pub(crate) fn apply_utxos_changed_subscription(&self, subscription: &UtxosChangedSubscription) -> Option { + pub(crate) fn apply_utxos_changed_subscription( + &self, + subscription: &UtxosChangedSubscription, + context: &SubscriptionContext, + ) -> Option { if subscription.to_all() { Some(self.clone()) } else { - let added = Self::filter_utxos(&self.added, subscription); - let removed = Self::filter_utxos(&self.removed, subscription); - debug!("CRPC, Creating UtxosChanged notifications with {} added and {} removed utxos", added.len(), removed.len()); + let added = Self::filter_utxos(&self.added, subscription, context); + let removed = Self::filter_utxos(&self.removed, subscription, context); if added.is_empty() && removed.is_empty() { None } else { + debug!("CRPC, Creating UtxosChanged notifications with {} added and {} removed utxos", added.len(), removed.len()); Some(Self { added: Arc::new(added), removed: Arc::new(removed) }) } } } - fn filter_utxos(utxo_set: &[RpcUtxosByAddressesEntry], subscription: &UtxosChangedSubscription) -> Vec { - utxo_set.iter().filter(|x| subscription.addresses().contains_key(&x.utxo_entry.script_public_key)).cloned().collect() + fn filter_utxos( + utxo_set: &[RpcUtxosByAddressesEntry], + subscription: &UtxosChangedSubscription, + context: &SubscriptionContext, + ) -> Vec { + let subscription_data = subscription.data(); + utxo_set.iter().filter(|x| subscription_data.contains(&x.utxo_entry.script_public_key, context)).cloned().collect() } } diff --git a/rpc/grpc/client/Cargo.toml b/rpc/grpc/client/Cargo.toml index 65d99e69a..f4be5818c 100644 --- a/rpc/grpc/client/Cargo.toml +++ b/rpc/grpc/client/Cargo.toml @@ -23,7 +23,9 @@ async-trait.workspace = true faster-hex.workspace = true futures.workspace = true h2.workspace = true +itertools.workspace = true log.workspace = true +parking_lot.workspace = true paste.workspace = true prost.workspace = true rand.workspace = true @@ -39,3 +41,6 @@ tokio-stream.workspace = true tonic = { workspace = true, features = ["gzip"] } triggered.workspace = true futures-util.workspace = true + +[features] +heap = [] diff --git a/rpc/grpc/client/src/client_pool.rs b/rpc/grpc/client/src/client_pool.rs index bc60e1c0e..497d293c5 100644 --- a/rpc/grpc/client/src/client_pool.rs +++ b/rpc/grpc/client/src/client_pool.rs @@ -1,29 +1,50 @@ use super::GrpcClient; use async_channel::{SendError, Sender}; use futures_util::Future; +use itertools::Itertools; use kaspa_core::trace; -use kaspa_utils::{any::type_name_short, channel::Channel}; -use std::sync::Arc; +use kaspa_utils::{any::type_name_short, channel::Channel, triggers::SingleTrigger}; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; use tokio::task::JoinHandle; pub struct ClientPool { + clients: Vec>, distribution_channel: Channel, - pub join_handles: Vec>, + running_tasks: Arc, + started: SingleTrigger, + shutdown: SingleTrigger, } impl ClientPool { - pub fn new(clients: Vec>, distribution_channel_capacity: usize, client_op: F) -> Self + pub fn new(clients: Vec>, distribution_channel_capacity: usize) -> Self { + let distribution_channel = Channel::bounded(distribution_channel_capacity); + let running_tasks = Arc::new(AtomicUsize::new(0)); + let started = SingleTrigger::new(); + let shutdown = SingleTrigger::new(); + Self { clients, distribution_channel, running_tasks, started, shutdown } + } + + pub fn start(&self, client_op: F) -> Vec> where F: Fn(Arc, T) -> R + Sync + Send + Copy + 'static, R: Future + Send, { - let distribution_channel = Channel::bounded(distribution_channel_capacity); - let join_handles = clients - .into_iter() + let tasks = self + .clients + .iter() + .cloned() .enumerate() .map(|(index, client)| { - let rx = distribution_channel.receiver(); + let running_tasks = self.running_tasks.clone(); + let started_listener = self.started_listener(); + let shutdown_trigger = self.shutdown.trigger.clone(); + let rx = self.distribution_channel.receiver(); tokio::spawn(async move { + let _ = running_tasks.fetch_add(1, Ordering::SeqCst); + started_listener.await; while let Ok(msg) = rx.recv().await { if client_op(client.clone(), msg).await { rx.close(); @@ -32,11 +53,21 @@ impl ClientPool { } client.disconnect().await.unwrap(); trace!("Client pool {} task {} exited", type_name_short::(), index); + if running_tasks.fetch_sub(1, Ordering::SeqCst) == 1 { + shutdown_trigger.trigger(); + } }) }) - .collect(); + .collect_vec(); + self.started.trigger.trigger(); + if tasks.is_empty() { + self.shutdown.trigger.trigger(); + } + tasks + } - Self { distribution_channel, join_handles } + pub fn clients(&self) -> &[Arc] { + &self.clients } pub async fn send_via_available_client(&self, msg: T) -> Result<(), SendError> { @@ -50,4 +81,12 @@ impl ClientPool { pub fn close(&self) { self.distribution_channel.close() } + + pub fn shutdown_listener(&self) -> triggered::Listener { + self.shutdown.listener.clone() + } + + pub fn started_listener(&self) -> triggered::Listener { + self.started.listener.clone() + } } diff --git a/rpc/grpc/client/src/lib.rs b/rpc/grpc/client/src/lib.rs index f1fa26db0..c7eebd8d1 100644 --- a/rpc/grpc/client/src/lib.rs +++ b/rpc/grpc/client/src/lib.rs @@ -7,7 +7,7 @@ use async_trait::async_trait; pub use client_pool::ClientPool; use connection_event::ConnectionEvent; use futures::{future::FutureExt, pin_mut, select}; -use kaspa_core::{debug, trace}; +use kaspa_core::{debug, error, trace}; use kaspa_grpc_core::{ channel::NotificationChannel, ops::KaspadPayloadOps, @@ -18,11 +18,14 @@ use kaspa_notify::{ collector::{Collector, CollectorFrom}, error::{Error as NotifyError, Result as NotifyResult}, events::{EventArray, EventType, EVENT_TYPE_ARRAY}, - listener::ListenerId, + listener::{ListenerId, ListenerLifespan}, notifier::{DynNotify, Notifier}, scope::Scope, subscriber::{Subscriber, SubscriptionManager}, - subscription::{array::ArrayBuilder, Command, Mutation, SingleSubscription}, + subscription::{ + array::ArrayBuilder, context::SubscriptionContext, Command, DynSubscription, MutateSingle, Mutation, MutationPolicies, + UtxosChangedMutationPolicy, + }, }; use kaspa_rpc_core::{ api::rpc::RpcApi, @@ -62,25 +65,58 @@ pub type GrpcClientCollector = CollectorFrom; pub type GrpcClientNotify = DynNotify; pub type GrpcClientNotifier = Notifier; -type DirectSubscriptions = Mutex>; +type DirectSubscriptions = Mutex>; #[derive(Debug, Clone)] pub struct GrpcClient { inner: Arc, /// In multi listener mode, a full-featured Notifier notifier: Option>, - /// In direct mode, a Collector relaying incoming notifications to any provided DynNotify + /// In direct mode, a Collector relaying incoming notifications via a channel (see `self.notification_channel_receiver()`) collector: Option>, subscriptions: Option>, + subscription_context: SubscriptionContext, + policies: MutationPolicies, notification_mode: NotificationMode, } const GRPC_CLIENT: &str = "grpc-client"; impl GrpcClient { - pub async fn connect( + pub const DIRECT_MODE_LISTENER_ID: ListenerId = 0; + + pub async fn connect(url: String) -> Result { + Self::connect_with_args(NotificationMode::Direct, url, None, false, None, false, None, Default::default()).await + } + + /// Connects to a gRPC server. + /// + /// `notification_mode` determines how notifications are handled: + /// + /// - `MultiListeners` => Multiple listeners are supported via the [`RpcApi`] implementation. + /// Registering listeners is needed before subscribing to notifications. + /// - `Direct` => A single listener receives the notification via a channel (see `self.notification_channel_receiver()`). + /// Registering a listener is pointless and ignored. + /// Subscribing to notifications ignores the listener ID. + /// + /// `url`: the server to connect to + /// + /// `subscription_context`: it is advised to provide a clone of the same instance if multiple clients dealing with + /// [`UtxosChangedNotifications`] are connected concurrently in order to optimize the memory footprint. + /// + /// `reconnect`: features an automatic reconnection to the server, reactivating all subscriptions on success. + /// + /// `connection_event_sender`: when provided will notify of connection and disconnection events via the channel. + /// + /// `override_handle_stop_notify`: legacy, should be removed in near future, always set to `false`. + /// + /// `timeout_duration`: request timeout duration + /// + /// `counters`: collects some bandwidth metrics + pub async fn connect_with_args( notification_mode: NotificationMode, url: String, + subscription_context: Option, reconnect: bool, connection_event_sender: Option>, override_handle_stop_notify: bool, @@ -100,27 +136,37 @@ impl GrpcClient { ) .await?; let converter = Arc::new(RpcCoreConverter::new()); + let policies = MutationPolicies::new(UtxosChangedMutationPolicy::AddressSet); + let subscription_context = subscription_context.unwrap_or_default(); let (notifier, collector, subscriptions) = match notification_mode { NotificationMode::MultiListeners => { let enabled_events = EVENT_TYPE_ARRAY[..].into(); let collector = Arc::new(GrpcClientCollector::new(GRPC_CLIENT, inner.notification_channel_receiver(), converter)); let subscriber = Arc::new(Subscriber::new(GRPC_CLIENT, enabled_events, inner.clone(), 0)); - let notifier: GrpcClientNotifier = Notifier::new(GRPC_CLIENT, enabled_events, vec![collector], vec![subscriber], 10); + let notifier: GrpcClientNotifier = Notifier::new( + GRPC_CLIENT, + enabled_events, + vec![collector], + vec![subscriber], + subscription_context.clone(), + 3, + policies, + ); (Some(Arc::new(notifier)), None, None) } NotificationMode::Direct => { let collector = GrpcClientCollector::new(GRPC_CLIENT, inner.notification_channel_receiver(), converter); - let subscriptions = ArrayBuilder::single(); + let subscriptions = ArrayBuilder::single(Self::DIRECT_MODE_LISTENER_ID, None); (None, Some(Arc::new(collector)), Some(Arc::new(Mutex::new(subscriptions)))) } }; if reconnect { // Start the connection monitor - inner.clone().spawn_connection_monitor(notifier.clone(), subscriptions.clone()); + inner.clone().spawn_connection_monitor(notifier.clone(), subscriptions.clone(), subscription_context.clone()); } - Ok(Self { inner, notifier, collector, subscriptions, notification_mode }) + Ok(Self { inner, notifier, collector, subscriptions, subscription_context, policies, notification_mode }) } #[inline(always)] @@ -232,8 +278,11 @@ impl RpcApi for GrpcClient { /// Register a new listener and returns an id identifying it. fn register_new_listener(&self, connection: ChannelConnection) -> ListenerId { match self.notification_mode { - NotificationMode::MultiListeners => self.notifier.as_ref().unwrap().register_new_listener(connection), - NotificationMode::Direct => ListenerId::default(), + NotificationMode::MultiListeners => { + self.notifier.as_ref().unwrap().register_new_listener(connection, ListenerLifespan::Dynamic) + } + // In direct mode, listener registration/unregistration is ignored + NotificationMode::Direct => Self::DIRECT_MODE_LISTENER_ID, } } @@ -245,6 +294,7 @@ impl RpcApi for GrpcClient { NotificationMode::MultiListeners => { self.notifier.as_ref().unwrap().unregister_listener(id)?; } + // In direct mode, listener registration/unregistration is ignored NotificationMode::Direct => {} } Ok(()) @@ -257,8 +307,14 @@ impl RpcApi for GrpcClient { self.notifier.clone().unwrap().try_start_notify(id, scope)?; } NotificationMode::Direct => { - let event: EventType = (&scope).into(); - self.subscriptions.as_ref().unwrap().lock().await[event].mutate(Mutation::new(Command::Start, scope.clone())); + if self.inner.will_reconnect() { + let event = scope.event_type(); + self.subscriptions.as_ref().unwrap().lock().await[event].mutate( + Mutation::new(Command::Start, scope.clone()), + self.policies, + &self.subscription_context, + )?; + } self.inner.start_notify_to_client(scope).await?; } } @@ -273,8 +329,14 @@ impl RpcApi for GrpcClient { self.notifier.clone().unwrap().try_stop_notify(id, scope)?; } NotificationMode::Direct => { - let event: EventType = (&scope).into(); - self.subscriptions.as_ref().unwrap().lock().await[event].mutate(Mutation::new(Command::Stop, scope.clone())); + if self.inner.will_reconnect() { + let event = scope.event_type(); + self.subscriptions.as_ref().unwrap().lock().await[event].mutate( + Mutation::new(Command::Stop, scope.clone()), + self.policies, + &self.subscription_context, + )?; + } self.inner.stop_notify_to_client(scope).await?; } } @@ -286,7 +348,6 @@ impl RpcApi for GrpcClient { } pub const CONNECT_TIMEOUT_DURATION: u64 = 20_000; -pub const KEEP_ALIVE_DURATION: u64 = 5_000; pub const REQUEST_TIMEOUT_DURATION: u64 = 5_000; pub const TIMEOUT_MONITORING_INTERVAL: u64 = 10_000; pub const RECONNECT_INTERVAL: u64 = 2_000; @@ -450,6 +511,7 @@ impl Inner { Ok(inner) } + #[allow(unused_variables)] async fn try_connect( url: String, request_sender: KaspadRequestSender, @@ -458,11 +520,17 @@ impl Inner { counters: Arc, ) -> Result<(Streaming, ServerFeatures)> { // gRPC endpoint + #[cfg(not(feature = "heap"))] let channel = tonic::transport::Channel::builder(url.parse::().map_err(|e| Error::String(e.to_string()))?) .timeout(tokio::time::Duration::from_millis(request_timeout)) .connect_timeout(tokio::time::Duration::from_millis(CONNECT_TIMEOUT_DURATION)) - .tcp_keepalive(Some(tokio::time::Duration::from_millis(KEEP_ALIVE_DURATION))) + .connect() + .await?; + + #[cfg(feature = "heap")] + let channel = + tonic::transport::Channel::builder(url.parse::().map_err(|e| Error::String(e.to_string()))?) .connect() .await?; @@ -474,15 +542,24 @@ impl Inner { body.map_err(|e| tonic::Status::from_error(Box::new(e))).boxed_unsync() })) .service(channel); - let mut client = RpcClient::new(channel) + + // Build the gRPC client with an interceptor setting the request timeout + #[cfg(not(feature = "heap"))] + let request_timeout = tokio::time::Duration::from_millis(request_timeout); + #[cfg(not(feature = "heap"))] + let mut client = RpcClient::with_interceptor(channel, move |mut req: tonic::Request<()>| { + req.set_timeout(request_timeout); + Ok(req) + }); + + #[cfg(feature = "heap")] + let mut client = RpcClient::new(channel); + + client = client .send_compressed(CompressionEncoding::Gzip) .accept_compressed(CompressionEncoding::Gzip) .max_decoding_message_size(RPC_MAX_MESSAGE_SIZE); - // Force the opening of the stream when connected to a go kaspad server. - // This is also needed for querying server capabilities. - request_sender.send(GetInfoRequestMessage {}.into()).await?; - // Prepare a request receiver stream let stream_receiver = request_receiver.clone(); let request_stream = async_stream::stream! { @@ -496,6 +573,7 @@ impl Inner { // Collect server capabilities as stated in GetInfoResponse let mut server_features = ServerFeatures::default(); + request_sender.send(GetInfoRequestMessage {}.into()).await?; match stream.message().await? { Some(ref msg) => { trace!("GRPC client: try_connect - GetInfo got a response"); @@ -506,7 +584,7 @@ impl Inner { } } None => { - trace!("GRPC client: try_connect - stream closed by the server"); + debug!("GRPC client: try_connect - stream closed by the server"); return Err(Error::String("GRPC stream was closed by the server".to_string())); } } @@ -518,6 +596,7 @@ impl Inner { self: Arc, notifier: Option>, subscriptions: Option>, + subscription_context: &SubscriptionContext, ) -> RpcResult<()> { assert_ne!( notifier.is_some(), @@ -549,12 +628,12 @@ impl Inner { let subscriptions = subscriptions.lock().await; for event in EVENT_TYPE_ARRAY { if subscriptions[event].active() { - self.clone().start_notify_to_client(subscriptions[event].scope()).await?; + self.clone().start_notify_to_client(subscriptions[event].scope(subscription_context)).await?; } } } - trace!("GRPC client: reconnected"); + debug!("GRPC client: reconnected"); Ok(()) } @@ -675,9 +754,12 @@ impl Inner { pin_mut!(shutdown); tokio::select! { + biased; + _ = shutdown => { break; } + message = stream.message() => { match message { Ok(msg) => { @@ -686,7 +768,7 @@ impl Inner { self.handle_response(response); }, None =>{ - trace!("GRPC client: response receiver task - the connection to the server is closed"); + debug!("GRPC client: response receiver task - the connection to the server is closed"); // A reconnection is needed break; @@ -694,7 +776,12 @@ impl Inner { } }, Err(err) => { - trace!("GRPC client: response receiver task - the response receiver gets an error from the server: {:?}", err); + debug!("GRPC client: response receiver task - the response receiver gets an error from the server: {:?}", err); + + // TODO: ignore cases not requiring a reconnection + + // A reconnection is needed + break; } } } @@ -723,6 +810,7 @@ impl Inner { self: Arc, notifier: Option>, subscriptions: Option>, + subscription_context: SubscriptionContext, ) { // Note: self is a cloned Arc here so that it can be used in the spawned task. @@ -745,7 +833,7 @@ impl Inner { _ = delay => { trace!("GRPC client: connection monitor task - running"); if !self.is_connected() { - match self.clone().reconnect(notifier.clone(), subscriptions.clone()).await { + match self.clone().reconnect(notifier.clone(), subscriptions.clone(), &subscription_context).await { Ok(_) => { trace!("GRPC client: reconnection to server succeeded"); }, @@ -775,12 +863,12 @@ impl Inner { match self.notification_channel.try_send(notification) { Ok(_) => {} Err(err) => { - trace!("GRPC client: error while trying to send a notification to the notifier: {:?}", err); + error!("GRPC client: error while trying to send a notification to the notifier: {:?}", err); } } } Err(err) => { - trace!("GRPC client: handle_response error converting response into notification: {:?}", err); + error!("GRPC client: handle_response error converting response into notification: {:?}", err); } } } else if response.payload.is_some() { diff --git a/rpc/grpc/server/src/adaptor.rs b/rpc/grpc/server/src/adaptor.rs index e8b97ebc3..c5536c6b6 100644 --- a/rpc/grpc/server/src/adaptor.rs +++ b/rpc/grpc/server/src/adaptor.rs @@ -1,6 +1,6 @@ use crate::{connection_handler::ConnectionHandler, manager::Manager}; use kaspa_core::debug; -use kaspa_notify::notifier::Notifier; +use kaspa_notify::{notifier::Notifier, subscription::context::SubscriptionContext}; use kaspa_rpc_core::{api::rpc::DynRpcService, notify::connection::ChannelConnection, Notification, RpcResult}; use kaspa_utils::networking::NetAddress; use kaspa_utils_tower::counters::TowerConnectionCounters; @@ -37,10 +37,20 @@ impl Adaptor { manager: Manager, core_service: DynRpcService, core_notifier: Arc>, + subscription_context: SubscriptionContext, + broadcasters: usize, counters: Arc, ) -> Arc { let (manager_sender, manager_receiver) = mpsc_channel(Self::manager_channel_size()); - let connection_handler = ConnectionHandler::new(network_bps, manager_sender, core_service.clone(), core_notifier, counters); + let connection_handler = ConnectionHandler::new( + network_bps, + manager_sender, + core_service.clone(), + core_notifier, + subscription_context, + broadcasters, + counters, + ); let server_termination = connection_handler.serve(serve_address); let adaptor = Arc::new(Adaptor::new(Some(server_termination), connection_handler, manager, serve_address)); adaptor.manager.clone().start_event_loop(manager_receiver); @@ -59,7 +69,7 @@ impl Adaptor { /// Terminates all connections and cleans up any additional async resources pub async fn stop(&self) -> RpcResult<()> { debug!("GRPC, Stopping the adaptor"); - self.terminate_all_connections(); + self.terminate_all_connections().await; self.connection_handler.stop().await?; Ok(()) } diff --git a/rpc/grpc/server/src/connection.rs b/rpc/grpc/server/src/connection.rs index bafb6f113..442eb2f42 100644 --- a/rpc/grpc/server/src/connection.rs +++ b/rpc/grpc/server/src/connection.rs @@ -16,7 +16,10 @@ use kaspa_grpc_core::{ protowire::{KaspadRequest, KaspadResponse}, }; use kaspa_notify::{ - connection::Connection as ConnectionT, error::Error as NotificationError, listener::ListenerId, notifier::Notifier, + connection::Connection as ConnectionT, + error::Error as NotificationError, + listener::{ListenerId, ListenerLifespan}, + notifier::Notifier, }; use kaspa_rpc_core::Notification; use parking_lot::Mutex; @@ -318,7 +321,8 @@ impl Connection { pub fn get_or_register_listener_id(&self) -> GrpcServerResult { match self.is_closed() { false => Ok(*self.inner.mutable_state.lock().listener_id.get_or_insert_with(|| { - let listener_id = self.inner.server_context.notifier.as_ref().register_new_listener(self.clone()); + let listener_id = + self.inner.server_context.notifier.as_ref().register_new_listener(self.clone(), ListenerLifespan::Dynamic); debug!("GRPC, Connection {} registered as notification listener {}", self, listener_id); listener_id })), diff --git a/rpc/grpc/server/src/connection_handler.rs b/rpc/grpc/server/src/connection_handler.rs index 47e5df9d6..8dba5b103 100644 --- a/rpc/grpc/server/src/connection_handler.rs +++ b/rpc/grpc/server/src/connection_handler.rs @@ -13,7 +13,14 @@ use kaspa_grpc_core::{ }, RPC_MAX_MESSAGE_SIZE, }; -use kaspa_notify::{connection::ChannelType, events::EVENT_TYPE_ARRAY, notifier::Notifier, subscriber::Subscriber}; +use kaspa_notify::{ + connection::ChannelType, + events::EVENT_TYPE_ARRAY, + listener::ListenerLifespan, + notifier::Notifier, + subscriber::Subscriber, + subscription::{context::SubscriptionContext, MutationPolicies, UtxosChangedMutationPolicy}, +}; use kaspa_rpc_core::{ api::rpc::DynRpcService, notify::{channel::NotificationChannel, connection::ChannelConnection}, @@ -79,20 +86,34 @@ impl ConnectionHandler { manager_sender: MpscSender, core_service: DynRpcService, core_notifier: Arc>, + subscription_context: SubscriptionContext, + broadcasters: usize, counters: Arc, ) -> Self { + // This notifier UTXOs subscription granularity to rpc-core notifier + let policies = MutationPolicies::new(UtxosChangedMutationPolicy::AddressSet); + // Prepare core objects let core_channel = NotificationChannel::default(); - let core_listener_id = - core_notifier.register_new_listener(ChannelConnection::new(core_channel.sender(), ChannelType::Closable)); + let core_listener_id = core_notifier.register_new_listener( + ChannelConnection::new(GRPC_SERVER, core_channel.sender(), ChannelType::Closable), + ListenerLifespan::Static(policies), + ); // Prepare internals let core_events = EVENT_TYPE_ARRAY[..].into(); let converter = Arc::new(GrpcServiceConverter::new()); let collector = Arc::new(GrpcServiceCollector::new(GRPC_SERVER, core_channel.receiver(), converter)); let subscriber = Arc::new(Subscriber::new(GRPC_SERVER, core_events, core_notifier, core_listener_id)); - let notifier: Arc> = - Arc::new(Notifier::new(GRPC_SERVER, core_events, vec![collector], vec![subscriber], 10)); + let notifier: Arc> = Arc::new(Notifier::new( + GRPC_SERVER, + core_events, + vec![collector], + vec![subscriber], + subscription_context, + broadcasters, + policies, + )); let server_context = ServerContext::new(core_service, notifier); let interface = Arc::new(Factory::new_interface(server_context.clone(), network_bps)); let running = Default::default(); @@ -118,11 +139,11 @@ impl ConnectionHandler { .max_decoding_message_size(RPC_MAX_MESSAGE_SIZE); // TODO: check whether we should set tcp_keepalive - const GRPC_KEEP_ALIVE_PING_INTERVAL: Duration = Duration::from_secs(3); - const GRPC_KEEP_ALIVE_PING_TIMEOUT: Duration = Duration::from_secs(10); + // const GRPC_KEEP_ALIVE_PING_INTERVAL: Duration = Duration::from_secs(5); + // const GRPC_KEEP_ALIVE_PING_TIMEOUT: Duration = Duration::from_secs(120); let serve_result = TonicServer::builder() - .http2_keepalive_interval(Some(GRPC_KEEP_ALIVE_PING_INTERVAL)) - .http2_keepalive_timeout(Some(GRPC_KEEP_ALIVE_PING_TIMEOUT)) + // .http2_keepalive_interval(Some(GRPC_KEEP_ALIVE_PING_INTERVAL)) + // .http2_keepalive_timeout(Some(GRPC_KEEP_ALIVE_PING_TIMEOUT)) .layer(measure_request_body_size_layer(bytes_rx, |b| b)) .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()))) .add_service(protowire_server) diff --git a/rpc/grpc/server/src/manager.rs b/rpc/grpc/server/src/manager.rs index 4212fdefb..cae37bc37 100644 --- a/rpc/grpc/server/src/manager.rs +++ b/rpc/grpc/server/src/manager.rs @@ -1,15 +1,15 @@ use crate::connection::{Connection, ConnectionId}; -use itertools::Itertools; use kaspa_core::{debug, info, warn}; use kaspa_notify::connection::Connection as ConnectionT; use parking_lot::RwLock; use std::{ - collections::{hash_map::Entry::Occupied, HashMap}, + collections::{hash_map::Entry::Occupied, HashMap, HashSet}, sync::Arc, + time::Duration, }; use thiserror::Error; -use tokio::sync::mpsc::Receiver as MpscReceiver; use tokio::sync::oneshot::Sender as OneshotSender; +use tokio::{sync::mpsc::Receiver as MpscReceiver, time::sleep}; #[derive(Debug, Error)] pub(crate) enum RegistrationError { @@ -108,13 +108,24 @@ impl Manager { } /// Terminate all connections - pub fn terminate_all_connections(&self) { - // Note that using drain here prevents unregister() to successfully find the entry... - let connections = self.connections.write().drain().map(|(_, cx)| cx).collect_vec(); - for (i, connection) in connections.into_iter().enumerate().rev() { - connection.close(); - // ... so we log explicitly here - info!("GRPC, end connection {} #{}", connection, i + 1); + pub async fn terminate_all_connections(&self) { + let mut closed_connections = HashSet::with_capacity(self.connections.read().len()); + loop { + if let Some((id, connection)) = self + .connections + .read() + .iter() + .filter(|(id, _)| !closed_connections.contains(*id)) + .map(|(id, cx)| (*id, cx.clone())) + .next() + { + closed_connections.insert(id); + connection.close(); + continue; + } else if self.connections.read().is_empty() { + break; + } + sleep(Duration::from_millis(10)).await; } } diff --git a/rpc/grpc/server/src/service.rs b/rpc/grpc/server/src/service.rs index 5d584ca40..7d810bf97 100644 --- a/rpc/grpc/server/src/service.rs +++ b/rpc/grpc/server/src/service.rs @@ -9,37 +9,54 @@ use kaspa_rpc_service::service::RpcCoreService; use kaspa_utils::{networking::NetAddress, triggers::SingleTrigger}; use kaspa_utils_tower::counters::TowerConnectionCounters; use std::sync::Arc; - -const GRPC_SERVICE: &str = "grpc-service"; +use triggered::Listener; pub struct GrpcService { net_address: NetAddress, config: Arc, core_service: Arc, rpc_max_clients: usize, + broadcasters: usize, + started: SingleTrigger, shutdown: SingleTrigger, counters: Arc, } impl GrpcService { + pub const IDENT: &'static str = "grpc-service"; + pub fn new( address: NetAddress, config: Arc, core_service: Arc, rpc_max_clients: usize, + broadcasters: usize, counters: Arc, ) -> Self { - Self { net_address: address, config, core_service, rpc_max_clients, shutdown: Default::default(), counters } + Self { + net_address: address, + config, + core_service, + rpc_max_clients, + broadcasters, + started: Default::default(), + shutdown: Default::default(), + counters, + } + } + + pub fn started(&self) -> Listener { + self.started.listener.clone() } } impl AsyncService for GrpcService { fn ident(self: Arc) -> &'static str { - GRPC_SERVICE + Self::IDENT } fn start(self: Arc) -> AsyncServiceFuture { - trace!("{} starting", GRPC_SERVICE); + trace!("{} starting", Self::IDENT); // Prepare a shutdown signal receiver let shutdown_signal = self.shutdown.listener.clone(); @@ -51,9 +68,14 @@ impl AsyncService for GrpcService { manager, self.core_service.clone(), self.core_service.notifier(), + self.core_service.subscription_context(), + self.broadcasters, self.counters.clone(), ); + // Signal the server was started + self.started.trigger.trigger(); + // Launch the service and wait for a shutdown signal Box::pin(async move { // Keep the gRPC server running until a service shutdown signal is received @@ -65,7 +87,7 @@ impl AsyncService for GrpcService { debug!("GRPC, Adaptor terminated successfully"); } Err(err) => { - warn!("{} error while stopping the connection handler: {}", GRPC_SERVICE, err); + warn!("{} error while stopping the connection handler: {}", Self::IDENT, err); } } @@ -75,13 +97,13 @@ impl AsyncService for GrpcService { } fn signal_exit(self: Arc) { - trace!("sending an exit signal to {}", GRPC_SERVICE); + trace!("sending an exit signal to {}", Self::IDENT); self.shutdown.trigger.trigger(); } fn stop(self: Arc) -> AsyncServiceFuture { Box::pin(async move { - trace!("{} stopped", GRPC_SERVICE); + trace!("{} stopped", Self::IDENT); Ok(()) }) } diff --git a/rpc/grpc/server/src/tests/client_server.rs b/rpc/grpc/server/src/tests/client_server.rs index f3bb208dc..99cdf6392 100644 --- a/rpc/grpc/server/src/tests/client_server.rs +++ b/rpc/grpc/server/src/tests/client_server.rs @@ -3,7 +3,7 @@ use crate::{adaptor::Adaptor, manager::Manager}; use kaspa_core::info; use kaspa_grpc_client::GrpcClient; use kaspa_notify::scope::{NewBlockTemplateScope, Scope}; -use kaspa_rpc_core::{api::rpc::RpcApi, notify::mode::NotificationMode}; +use kaspa_rpc_core::api::rpc::RpcApi; use kaspa_utils::networking::{ContextualNetAddress, NetAddress}; use std::sync::Arc; @@ -21,6 +21,7 @@ async fn test_client_server_sanity_check() { let client = create_client(server.serve_address()).await; assert_eq!(server.active_connections().len(), 1, "the client failed to connect to the server"); + assert!(client.handle_message_id() && client.handle_stop_notify(), "the client failed to collect server features"); // Stop the fake service rpc_core_service.join().await; @@ -91,7 +92,7 @@ async fn test_client_server_connections() { // Terminate connections server-side if self.terminate_clients { - server.terminate_all_connections(); + server.terminate_all_connections().await; tokio::time::sleep(std::time::Duration::from_millis(25)).await; for (i, client) in clients.iter().enumerate() { assert!(!client.is_connected(), "server failed to disconnect client {}", i); @@ -189,12 +190,21 @@ async fn test_client_server_notifications() { fn create_server(core_service: Arc) -> Arc { let manager = Manager::new(128); - Adaptor::server(get_free_net_address(), 1, manager, core_service.clone(), core_service.core_notifier(), Default::default()) + Adaptor::server( + get_free_net_address(), + 1, + manager, + core_service.clone(), + core_service.core_notifier(), + core_service.subscription_context(), + 3, + Default::default(), + ) } async fn create_client(server_address: NetAddress) -> GrpcClient { let server_url = format!("grpc://localhost:{}", server_address.port); - GrpcClient::connect(NotificationMode::Direct, server_url, false, None, false, None, Default::default()).await.unwrap() + GrpcClient::connect(server_url).await.unwrap() } fn get_free_net_address() -> NetAddress { diff --git a/rpc/grpc/server/src/tests/rpc_core_mock.rs b/rpc/grpc/server/src/tests/rpc_core_mock.rs index 5d142fb9f..ddf78ccbd 100644 --- a/rpc/grpc/server/src/tests/rpc_core_mock.rs +++ b/rpc/grpc/server/src/tests/rpc_core_mock.rs @@ -1,9 +1,11 @@ use async_channel::{unbounded, Receiver}; use async_trait::async_trait; use kaspa_notify::events::EVENT_TYPE_ARRAY; -use kaspa_notify::listener::ListenerId; +use kaspa_notify::listener::{ListenerId, ListenerLifespan}; use kaspa_notify::notifier::{Notifier, Notify}; use kaspa_notify::scope::Scope; +use kaspa_notify::subscription::context::SubscriptionContext; +use kaspa_notify::subscription::{MutationPolicies, UtxosChangedMutationPolicy}; use kaspa_rpc_core::{api::rpc::RpcApi, *}; use kaspa_rpc_core::{notify::connection::ChannelConnection, RpcResult}; use std::sync::Arc; @@ -17,13 +19,30 @@ pub(super) struct RpcCoreMock { impl RpcCoreMock { pub(super) fn new() -> Self { - Self::default() + let (sync_sender, sync_receiver) = unbounded(); + let policies = MutationPolicies::new(UtxosChangedMutationPolicy::AddressSet); + let subscription_context = SubscriptionContext::new(); + let core_notifier: Arc = Arc::new(Notifier::with_sync( + "rpc-core", + EVENT_TYPE_ARRAY[..].into(), + vec![], + vec![], + subscription_context, + 10, + policies, + Some(sync_sender), + )); + Self { core_notifier, _sync_receiver: sync_receiver } } pub(super) fn core_notifier(&self) -> Arc { self.core_notifier.clone() } + pub(super) fn subscription_context(&self) -> SubscriptionContext { + self.core_notifier.subscription_context().clone() + } + #[allow(dead_code)] pub(super) fn notify_new_block_template(&self) -> kaspa_notify::error::Result<()> { let notification = Notification::NewBlockTemplate(NewBlockTemplateNotification {}); @@ -44,15 +63,6 @@ impl RpcCoreMock { } } -impl Default for RpcCoreMock { - fn default() -> Self { - let (sync_sender, sync_receiver) = unbounded(); - let core_notifier: Arc = - Arc::new(Notifier::with_sync("rpc-core", EVENT_TYPE_ARRAY[..].into(), vec![], vec![], 10, Some(sync_sender))); - Self { core_notifier, _sync_receiver: sync_receiver } - } -} - #[async_trait] impl RpcApi for RpcCoreMock { // This fn needs to succeed while the client connects @@ -63,8 +73,8 @@ impl RpcApi for RpcCoreMock { server_version: "mock".to_string(), is_utxo_indexed: false, is_synced: false, - has_notify_command: false, - has_message_id: false, + has_notify_command: true, + has_message_id: true, }) } @@ -222,7 +232,7 @@ impl RpcApi for RpcCoreMock { // Notification API fn register_new_listener(&self, connection: ChannelConnection) -> ListenerId { - self.core_notifier.register_new_listener(connection) + self.core_notifier.register_new_listener(connection, ListenerLifespan::Dynamic) } async fn unregister_listener(&self, id: ListenerId) -> RpcResult<()> { diff --git a/rpc/service/Cargo.toml b/rpc/service/Cargo.toml index 2be71a8b7..d606d5153 100644 --- a/rpc/service/Cargo.toml +++ b/rpc/service/Cargo.toml @@ -32,4 +32,5 @@ kaspa-utxoindex.workspace = true async-trait.workspace = true log.workspace = true tokio.workspace = true +triggered.workspace = true workflow-rpc.workspace = true diff --git a/rpc/service/src/service.rs b/rpc/service/src/service.rs index eb5eea083..00cc0d082 100644 --- a/rpc/service/src/service.rs +++ b/rpc/service/src/service.rs @@ -36,6 +36,9 @@ use kaspa_index_core::{ }; use kaspa_mining::model::tx_query::TransactionQuery; use kaspa_mining::{manager::MiningManagerProxy, mempool::tx::Orphan}; +use kaspa_notify::listener::ListenerLifespan; +use kaspa_notify::subscription::context::SubscriptionContext; +use kaspa_notify::subscription::{MutationPolicies, UtxosChangedMutationPolicy}; use kaspa_notify::{ collector::DynCollector, connection::ChannelType, @@ -102,6 +105,7 @@ pub struct RpcCoreService { wrpc_borsh_counters: Arc, wrpc_json_counters: Arc, shutdown: SingleTrigger, + core_shutdown_request: SingleTrigger, perf_monitor: Arc>>, p2p_tower_counters: Arc, grpc_tower_counters: Arc, @@ -110,6 +114,8 @@ pub struct RpcCoreService { const RPC_CORE: &str = "rpc-core"; impl RpcCoreService { + pub const IDENT: &'static str = "rpc-core-service"; + #[allow(clippy::too_many_arguments)] pub fn new( consensus_manager: Arc, @@ -117,6 +123,7 @@ impl RpcCoreService { index_notifier: Option>, mining_manager: MiningManagerProxy, flow_context: Arc, + subscription_context: SubscriptionContext, utxoindex: Option, config: Arc, core: Arc, @@ -127,10 +134,18 @@ impl RpcCoreService { p2p_tower_counters: Arc, grpc_tower_counters: Arc, ) -> Self { + // This notifier UTXOs subscription granularity to index-processor or consensus notifier + let policies = match index_notifier { + Some(_) => MutationPolicies::new(UtxosChangedMutationPolicy::AddressSet), + None => MutationPolicies::new(UtxosChangedMutationPolicy::Wildcard), + }; + // Prepare consensus-notify objects let consensus_notify_channel = Channel::::default(); - let consensus_notify_listener_id = consensus_notifier - .register_new_listener(ConsensusChannelConnection::new(consensus_notify_channel.sender(), ChannelType::Closable)); + let consensus_notify_listener_id = consensus_notifier.register_new_listener( + ConsensusChannelConnection::new(RPC_CORE, consensus_notify_channel.sender(), ChannelType::Closable), + ListenerLifespan::Static(Default::default()), + ); // Prepare the rpc-core notifier objects let mut consensus_events: EventSwitches = EVENT_TYPE_ARRAY[..].into(); @@ -152,9 +167,10 @@ impl RpcCoreService { let index_converter = Arc::new(IndexConverter::new(config.clone())); if let Some(ref index_notifier) = index_notifier { let index_notify_channel = Channel::::default(); - let index_notify_listener_id = index_notifier - .clone() - .register_new_listener(IndexChannelConnection::new(index_notify_channel.sender(), ChannelType::Closable)); + let index_notify_listener_id = index_notifier.clone().register_new_listener( + IndexChannelConnection::new(RPC_CORE, index_notify_channel.sender(), ChannelType::Closable), + ListenerLifespan::Static(policies), + ); let index_events: EventSwitches = [EventType::UtxosChanged, EventType::PruningPointUtxoSetOverride].as_ref().into(); let index_collector = @@ -170,7 +186,8 @@ impl RpcCoreService { let protocol_converter = Arc::new(ProtocolConverter::new(flow_context.clone())); // Create the rcp-core notifier - let notifier = Arc::new(Notifier::new(RPC_CORE, EVENT_TYPE_ARRAY[..].into(), collectors, subscribers, 1)); + let notifier = + Arc::new(Notifier::new(RPC_CORE, EVENT_TYPE_ARRAY[..].into(), collectors, subscribers, subscription_context, 1, policies)); Self { consensus_manager, @@ -187,6 +204,7 @@ impl RpcCoreService { wrpc_borsh_counters, wrpc_json_counters, shutdown: SingleTrigger::default(), + core_shutdown_request: SingleTrigger::default(), perf_monitor, p2p_tower_counters, grpc_tower_counters, @@ -198,7 +216,7 @@ impl RpcCoreService { } pub async fn join(&self) -> RpcResult<()> { - trace!("{} joining notifier", RPC_CORE_SERVICE); + trace!("{} joining notifier", Self::IDENT); self.notifier().join().await?; Ok(()) } @@ -208,6 +226,15 @@ impl RpcCoreService { self.notifier.clone() } + #[inline(always)] + pub fn subscription_context(&self) -> SubscriptionContext { + self.notifier.subscription_context().clone() + } + + pub fn core_shutdown_request_listener(&self) -> triggered::Listener { + self.core_shutdown_request.listener.clone() + } + async fn get_utxo_set_by_script_public_key<'a>( &self, addresses: impl Iterator, @@ -740,7 +767,11 @@ NOTE: This error usually indicates an RPC conversion error between the node and } warn!("Shutdown RPC command was called, shutting down in 1 second..."); - // Wait a second before shutting down, to allow time to return the response to the caller + // Signal the shutdown request + self.core_shutdown_request.trigger.trigger(); + + // Wait for a second before shutting down, + // giving time for the response to be sent to the caller. let core = self.core.clone(); tokio::spawn(async move { tokio::time::sleep(std::time::Duration::from_secs(1)).await; @@ -868,7 +899,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and /// Register a new listener and returns an id identifying it. fn register_new_listener(&self, connection: ChannelConnection) -> ListenerId { - self.notifier.register_new_listener(connection) + self.notifier.register_new_listener(connection, ListenerLifespan::Dynamic) } /// Unregister an existing listener. @@ -906,17 +937,15 @@ NOTE: This error usually indicates an RPC conversion error between the node and } } -const RPC_CORE_SERVICE: &str = "rpc-core-service"; - // It might be necessary to opt this out in the context of wasm32 impl AsyncService for RpcCoreService { fn ident(self: Arc) -> &'static str { - RPC_CORE_SERVICE + Self::IDENT } fn start(self: Arc) -> AsyncServiceFuture { - trace!("{} starting", RPC_CORE_SERVICE); + trace!("{} starting", Self::IDENT); let service = self.clone(); // Prepare a shutdown signal receiver @@ -929,7 +958,7 @@ impl AsyncService for RpcCoreService { match service.join().await { Ok(_) => Ok(()), Err(err) => { - warn!("Error while stopping {}: {}", RPC_CORE_SERVICE, err); + warn!("Error while stopping {}: {}", Self::IDENT, err); Err(AsyncServiceError::Service(err.to_string())) } } @@ -937,13 +966,13 @@ impl AsyncService for RpcCoreService { } fn signal_exit(self: Arc) { - trace!("sending an exit signal to {}", RPC_CORE_SERVICE); + trace!("sending an exit signal to {}", Self::IDENT); self.shutdown.trigger.trigger(); } fn stop(self: Arc) -> AsyncServiceFuture { Box::pin(async move { - trace!("{} stopped", RPC_CORE_SERVICE); + trace!("{} stopped", Self::IDENT); Ok(()) }) } diff --git a/rpc/wrpc/client/src/client.rs b/rpc/wrpc/client/src/client.rs index e8bda7e01..2b6d85646 100644 --- a/rpc/wrpc/client/src/client.rs +++ b/rpc/wrpc/client/src/client.rs @@ -2,6 +2,10 @@ use crate::error::Error; use crate::imports::*; use crate::parse::parse_host; use kaspa_consensus_core::network::NetworkType; +use kaspa_notify::{ + listener::ListenerLifespan, + subscription::{context::SubscriptionContext, MutationPolicies, UtxosChangedMutationPolicy}, +}; use kaspa_rpc_core::{ api::ctl::RpcCtl, notify::collector::{RpcCoreCollector, RpcCoreConverter}, @@ -172,19 +176,34 @@ pub struct KaspaRpcClient { impl KaspaRpcClient { /// Create a new `KaspaRpcClient` with the given Encoding and URL - pub fn new(encoding: Encoding, url: &str) -> Result { - Self::new_with_args(encoding, NotificationMode::Direct, url) + // FIXME + pub fn new(encoding: Encoding, url: &str, subscription_context: Option) -> Result { + Self::new_with_args(encoding, NotificationMode::Direct, url, subscription_context) } /// Extended constructor that accepts [`NotificationMode`] argument. - pub fn new_with_args(encoding: Encoding, notification_mode: NotificationMode, url: &str) -> Result { + pub fn new_with_args( + encoding: Encoding, + notification_mode: NotificationMode, + url: &str, + subscription_context: Option, + ) -> Result { let inner = Arc::new(Inner::new(encoding, url)?); let notifier = if matches!(notification_mode, NotificationMode::MultiListeners) { let enabled_events = EVENT_TYPE_ARRAY[..].into(); let converter = Arc::new(RpcCoreConverter::new()); let collector = Arc::new(RpcCoreCollector::new(WRPC_CLIENT, inner.notification_channel_receiver(), converter)); let subscriber = Arc::new(Subscriber::new(WRPC_CLIENT, enabled_events, inner.clone(), 0)); - Some(Arc::new(Notifier::new(WRPC_CLIENT, enabled_events, vec![collector], vec![subscriber], 3))) + let policies = MutationPolicies::new(UtxosChangedMutationPolicy::AddressSet); + Some(Arc::new(Notifier::new( + WRPC_CLIENT, + enabled_events, + vec![collector], + vec![subscriber], + subscription_context.unwrap_or_default(), + 3, + policies, + ))) } else { None }; @@ -444,7 +463,9 @@ impl RpcApi for KaspaRpcClient { /// Register a new listener and returns an id and a channel receiver. fn register_new_listener(&self, connection: ChannelConnection) -> ListenerId { match self.notification_mode { - NotificationMode::MultiListeners => self.notifier.as_ref().unwrap().register_new_listener(connection), + NotificationMode::MultiListeners => { + self.notifier.as_ref().unwrap().register_new_listener(connection, ListenerLifespan::Dynamic) + } NotificationMode::Direct => ListenerId::default(), } } diff --git a/rpc/wrpc/client/src/wasm.rs b/rpc/wrpc/client/src/wasm.rs index cd9f7e13c..9718ee5a4 100644 --- a/rpc/wrpc/client/src/wasm.rs +++ b/rpc/wrpc/client/src/wasm.rs @@ -41,7 +41,7 @@ impl RpcClient { let url = if let Some(network_type) = network_type { Self::parse_url(url, encoding, network_type)? } else { url.to_string() }; let rpc_client = RpcClient { - client: Arc::new(KaspaRpcClient::new(encoding, url.as_str()).unwrap_or_else(|err| panic!("{err}"))), + client: Arc::new(KaspaRpcClient::new(encoding, url.as_str(), None).unwrap_or_else(|err| panic!("{err}"))), inner: Arc::new(Inner { notification_task: AtomicBool::new(false), notification_ctl: DuplexChannel::oneshot(), @@ -218,7 +218,7 @@ impl RpcClient { .into_iter() .map(|jsv| from_value(jsv).map_err(|err| JsError::new(&err.to_string()))) .collect::, JsError>>()?; - self.client.start_notify(ListenerId::default(), Scope::UtxosChanged(UtxosChangedScope { addresses })).await?; + self.client.start_notify(ListenerId::default(), UtxosChangedScope::new(addresses).into()).await?; Ok(()) } @@ -230,7 +230,7 @@ impl RpcClient { .into_iter() .map(|jsv| from_value(jsv).map_err(|err| JsError::new(&err.to_string()))) .collect::, JsError>>()?; - self.client.stop_notify(ListenerId::default(), Scope::UtxosChanged(UtxosChangedScope { addresses })).await?; + self.client.stop_notify(ListenerId::default(), UtxosChangedScope::new(addresses).into()).await?; Ok(()) } diff --git a/rpc/wrpc/server/src/connection.rs b/rpc/wrpc/server/src/connection.rs index 64bcff49e..86345e5d5 100644 --- a/rpc/wrpc/server/src/connection.rs +++ b/rpc/wrpc/server/src/connection.rs @@ -8,7 +8,7 @@ use kaspa_notify::{ }; use kaspa_rpc_core::{api::ops::RpcApiOps, notify::mode::NotificationMode, Notification}; use std::{ - fmt::Debug, + fmt::{Debug, Display}, sync::{Arc, Mutex}, }; use workflow_log::log_trace; @@ -65,6 +65,12 @@ impl Notify for ConnectionInner { } } +impl Display for ConnectionInner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}@{}", self.id, self.peer) + } +} + /// [`Connection`] represents a currently connected WebSocket RPC channel. /// This struct owns a [`Messenger`] that has [`Messenger::notify`] /// function that can be used to post notifications to the connection. @@ -132,6 +138,12 @@ impl Connection { } } +impl Display for Connection { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.inner) + } +} + #[async_trait::async_trait] impl ConnectionT for Connection { type Notification = Notification; diff --git a/rpc/wrpc/server/src/server.rs b/rpc/wrpc/server/src/server.rs index ca0188316..562bbf34b 100644 --- a/rpc/wrpc/server/src/server.rs +++ b/rpc/wrpc/server/src/server.rs @@ -5,7 +5,15 @@ use crate::{ service::Options, }; use kaspa_grpc_client::GrpcClient; -use kaspa_notify::{connection::ChannelType, events::EVENT_TYPE_ARRAY, notifier::Notifier, scope::Scope, subscriber::Subscriber}; +use kaspa_notify::{ + connection::ChannelType, + events::EVENT_TYPE_ARRAY, + listener::ListenerLifespan, + notifier::Notifier, + scope::Scope, + subscriber::Subscriber, + subscription::{MutationPolicies, UtxosChangedMutationPolicy}, +}; use kaspa_rpc_core::{ api::rpc::{DynRpcService, RpcApi}, notify::{channel::NotificationChannel, connection::ChannelConnection, mode::NotificationMode}, @@ -46,6 +54,9 @@ const WRPC_SERVER: &str = "wrpc-server"; impl Server { pub fn new(tasks: usize, encoding: Encoding, core_service: Option>, options: Arc) -> Self { + // This notifier UTXOs subscription granularity to rpc-core notifier + let policies = MutationPolicies::new(UtxosChangedMutationPolicy::AddressSet); + // Either get a core service or be called from the proxy and rely each connection having its own gRPC client assert_eq!( core_service.is_none(), @@ -56,15 +67,25 @@ impl Server { let rpc_core = if let Some(service) = core_service { // Prepare rpc service objects let notification_channel = NotificationChannel::default(); - let listener_id = - service.notifier().register_new_listener(ChannelConnection::new(notification_channel.sender(), ChannelType::Closable)); + let listener_id = service.notifier().register_new_listener( + ChannelConnection::new(WRPC_SERVER, notification_channel.sender(), ChannelType::Closable), + ListenerLifespan::Static(policies), + ); // Prepare notification internals let enabled_events = EVENT_TYPE_ARRAY[..].into(); let converter = Arc::new(WrpcServiceConverter::new()); let collector = Arc::new(WrpcServiceCollector::new(WRPC_SERVER, notification_channel.receiver(), converter)); let subscriber = Arc::new(Subscriber::new(WRPC_SERVER, enabled_events, service.notifier(), listener_id)); - let wrpc_notifier = Arc::new(Notifier::new(WRPC_SERVER, enabled_events, vec![collector], vec![subscriber], tasks)); + let wrpc_notifier = Arc::new(Notifier::new( + WRPC_SERVER, + enabled_events, + vec![collector], + vec![subscriber], + service.subscription_context(), + tasks, + policies, + )); Some(RpcCore { service, wrpc_notifier }) } else { None @@ -96,9 +117,10 @@ impl Server { // Provider::GrpcClient log_info!("Routing wrpc://{peer} -> {grpc_proxy_address}"); - let grpc_client = GrpcClient::connect( + let grpc_client = GrpcClient::connect_with_args( NotificationMode::Direct, grpc_proxy_address.to_owned(), + None, false, None, true, @@ -163,7 +185,7 @@ impl Server { // is always set to Some(ListenerId::default()) by the connection ctor. let notifier = self.notifier().unwrap_or_else(|| panic!("Incorrect use: `server::Server` does not carry an internal notifier")); - let listener_id = notifier.register_new_listener(connection.clone()); + let listener_id = notifier.register_new_listener(connection.clone(), ListenerLifespan::Dynamic); connection.register_notification_listener(listener_id); listener_id }; diff --git a/testing/integration/Cargo.toml b/testing/integration/Cargo.toml index 2d9d2ce25..7d9dd99af 100644 --- a/testing/integration/Cargo.toml +++ b/testing/integration/Cargo.toml @@ -11,38 +11,45 @@ repository.workspace = true [dependencies] kaspa-alloc.workspace = true # This changes the global allocator for all of the next dependencies so should be kept first + kaspa-addresses.workspace = true +kaspa-bip32.workspace = true kaspa-consensus-core.workspace = true kaspa-consensus-notify.workspace = true kaspa-consensus.workspace = true kaspa-consensusmanager.workspace = true kaspa-core.workspace = true +kaspa-database.workspace = true kaspa-grpc-client.workspace = true kaspa-grpc-core.workspace = true +kaspa-grpc-server.workspace = true kaspa-hashes.workspace = true +kaspa-index-processor.workspace = true kaspa-math.workspace = true kaspa-merkle.workspace = true +kaspa-muhash.workspace = true kaspa-notify.workspace = true kaspa-pow.workspace = true kaspa-rpc-core.workspace = true +kaspa-rpc-service.workspace = true +kaspa-txscript.workspace = true +kaspa-utils.workspace = true +kaspa-utxoindex.workspace = true +kaspa-wrpc-server.workspace = true kaspad.workspace = true async-channel.workspace = true +async-trait.workspace = true bincode.workspace = true +clap.workspace = true +chrono.workspace = true crossbeam-channel.workspace = true +dhat = { workspace = true, optional = true } faster-hex.workspace = true flate2.workspace = true futures-util.workspace = true indexmap.workspace = true itertools.workspace = true -kaspa-bip32.workspace = true -kaspa-database.workspace = true -kaspa-index-processor.workspace = true -kaspa-muhash.workspace = true -kaspa-txscript.workspace = true -kaspa-utils.workspace = true -kaspa-utxoindex.workspace = true -kaspa-wrpc-server.workspace = true log.workspace = true parking_lot.workspace = true rand_distr.workspace = true @@ -56,13 +63,15 @@ smallvec.workspace = true tempfile.workspace = true thiserror.workspace = true tokio.workspace = true +workflow-perf-monitor.workspace = true [dev-dependencies] criterion.workspace = true rand = { workspace = true, features = ["small_rng"] } -tokio = { workspace = true, features = ["rt", "macros"] } +tokio = { workspace = true, features = ["rt", "macros", "process"] } kaspa-txscript-errors.workspace = true [features] +heap = ["dhat"] html_reports = [] devnet-prealloc = ["kaspad/devnet-prealloc"] diff --git a/testing/integration/src/common/args.rs b/testing/integration/src/common/args.rs new file mode 100644 index 000000000..faebd5d20 --- /dev/null +++ b/testing/integration/src/common/args.rs @@ -0,0 +1,80 @@ +use crate::tasks::daemon::DaemonArgs; +#[cfg(feature = "devnet-prealloc")] +use kaspa_addresses::Address; +use kaspad_lib::args::Args; + +pub struct ArgsBuilder { + args: Args, +} + +impl ArgsBuilder { + #[cfg(feature = "devnet-prealloc")] + pub fn simnet(num_prealloc_utxos: u64, prealloc_amount: u64) -> Self { + let args = Args { + simnet: true, + disable_upnp: true, // UPnP registration might take some time and is not needed for this test + enable_unsynced_mining: true, + num_prealloc_utxos: Some(num_prealloc_utxos), + prealloc_amount: prealloc_amount * kaspa_consensus_core::constants::SOMPI_PER_KASPA, + block_template_cache_lifetime: Some(0), + rpc_max_clients: 2500, + unsafe_rpc: true, + ..Default::default() + }; + + Self { args } + } + + #[cfg(not(feature = "devnet-prealloc"))] + pub fn simnet() -> Self { + let args = Args { + simnet: true, + disable_upnp: true, // UPnP registration might take some time and is not needed for this test + enable_unsynced_mining: true, + block_template_cache_lifetime: Some(0), + rpc_max_clients: 2500, + unsafe_rpc: true, + ..Default::default() + }; + + Self { args } + } + + #[cfg(feature = "devnet-prealloc")] + pub fn prealloc_address(mut self, prealloc_address: Address) -> Self { + self.args.prealloc_address = Some(prealloc_address.to_string()); + self + } + + pub fn rpc_max_clients(mut self, rpc_max_clients: usize) -> Self { + self.args.rpc_max_clients = rpc_max_clients; + self + } + + pub fn max_tracked_addresses(mut self, max_tracked_addresses: usize) -> Self { + self.args.max_tracked_addresses = max_tracked_addresses; + self + } + + pub fn utxoindex(mut self, utxoindex: bool) -> Self { + self.args.utxoindex = utxoindex; + self + } + + pub fn apply_args(mut self, edit_func: F) -> Self + where + F: Fn(&mut Args), + { + edit_func(&mut self.args); + self + } + + pub fn apply_daemon_args(mut self, daemon_args: &DaemonArgs) -> Self { + daemon_args.apply_to(&mut self.args); + self + } + + pub fn build(self) -> Args { + self.args + } +} diff --git a/testing/integration/src/common/client.rs b/testing/integration/src/common/client.rs new file mode 100644 index 000000000..0de974c0b --- /dev/null +++ b/testing/integration/src/common/client.rs @@ -0,0 +1,67 @@ +use super::{daemon::Daemon, listener::Listener}; +use kaspa_grpc_client::GrpcClient; +use kaspa_notify::{events::EventType, scope::Scope, subscription::Command}; +use kaspa_rpc_core::RpcResult; +use std::{ + collections::{hash_map::Entry, HashMap}, + ops::Deref, +}; + +/// A multi-listener gRPC client with event type dedicated listeners +pub struct ListeningClient { + client: GrpcClient, + listeners: HashMap, +} + +impl ListeningClient { + pub async fn connect(kaspad: &Daemon) -> Self { + let client = kaspad.new_multi_listener_client().await; + client.start(None).await; + let listeners = Default::default(); + ListeningClient { client, listeners } + } + + pub async fn start_notify(&mut self, scope: Scope) -> RpcResult<()> { + let event = scope.event_type(); + match self.listeners.entry(event) { + Entry::Occupied(e) => e.get().execute_subscribe_command(scope, Command::Start).await, + Entry::Vacant(e) => { + e.insert(Listener::subscribe(self.client.clone(), scope).await?); + Ok(()) + } + } + } + + #[allow(dead_code)] + pub async fn stop_notify(&mut self, scope: Scope) -> RpcResult<()> { + let event = scope.event_type(); + match self.listeners.entry(event) { + Entry::Occupied(e) => e.get().execute_subscribe_command(scope, Command::Stop).await, + Entry::Vacant(_) => Ok(()), + } + } + + pub fn listener(&self, event: EventType) -> Option { + self.listeners.get(&event).cloned() + } + + pub fn block_added_listener(&self) -> Option { + self.listener(EventType::BlockAdded) + } + + pub fn utxos_changed_listener(&self) -> Option { + self.listener(EventType::UtxosChanged) + } + + pub fn virtual_daa_score_changed_listener(&self) -> Option { + self.listener(EventType::VirtualDaaScoreChanged) + } +} + +impl Deref for ListeningClient { + type Target = GrpcClient; + + fn deref(&self) -> &Self::Target { + &self.client + } +} diff --git a/testing/integration/src/common/daemon.rs b/testing/integration/src/common/daemon.rs index 8adbea773..44cde46a2 100644 --- a/testing/integration/src/common/daemon.rs +++ b/testing/integration/src/common/daemon.rs @@ -1,42 +1,108 @@ -use futures_util::Future; use kaspa_consensus_core::network::NetworkId; -use kaspa_core::{core::Core, signals::Shutdown}; +use kaspa_core::{core::Core, signals::Shutdown, task::runtime::AsyncRuntime}; use kaspa_database::utils::get_kaspa_tempdir; use kaspa_grpc_client::GrpcClient; +use kaspa_grpc_server::service::GrpcService; +use kaspa_notify::subscription::context::SubscriptionContext; use kaspa_rpc_core::notify::mode::NotificationMode; +use kaspa_rpc_service::service::RpcCoreService; +use kaspa_utils::triggers::Listener; use kaspad_lib::{args::Args, daemon::create_core_with_runtime}; -use std::{sync::Arc, time::Duration}; +use parking_lot::RwLock; +use std::{ops::Deref, sync::Arc, time::Duration}; use tempfile::TempDir; use kaspa_grpc_client::ClientPool; -pub struct Daemon { - // Type and suffix of the daemon network +pub struct ClientManager { + pub args: RwLock, + + /// Type and suffix of the daemon network pub network: NetworkId, + /// Clients subscription context + pub context: SubscriptionContext, + // Daemon ports pub rpc_port: u16, pub p2p_port: u16, +} + +impl ClientManager { + pub fn new(args: Args) -> Self { + let network = args.network(); + let context = SubscriptionContext::with_options(None); + let rpc_port = args.rpclisten.unwrap().normalize(0).port; + let p2p_port = args.listen.unwrap().normalize(0).port; + let args = RwLock::new(args); + Self { args, network, context, rpc_port, p2p_port } + } + + pub async fn new_client(&self) -> GrpcClient { + GrpcClient::connect_with_args( + NotificationMode::Direct, + format!("grpc://localhost:{}", self.rpc_port), + Some(self.context.clone()), + false, + None, + false, + Some(500_000), + Default::default(), + ) + .await + .unwrap() + } + + pub async fn new_clients(&self, count: usize) -> Vec { + let mut clients = Vec::with_capacity(count); + for _ in 0..count { + clients.push(self.new_client().await); + } + clients + } + + pub async fn new_multi_listener_client(&self) -> GrpcClient { + GrpcClient::connect_with_args( + NotificationMode::MultiListeners, + format!("grpc://localhost:{}", self.rpc_port), + Some(self.context.clone()), + true, + None, + false, + Some(500_000), + Default::default(), + ) + .await + .unwrap() + } + + pub async fn new_client_pool(&self, pool_size: usize, distribution_channel_capacity: usize) -> ClientPool { + let mut clients = Vec::with_capacity(pool_size); + for _ in 0..pool_size { + clients.push(Arc::new(self.new_client().await)); + } + ClientPool::new(clients, distribution_channel_capacity) + } +} + +pub struct Daemon { + client_manager: Arc, pub core: Arc, + grpc_server_started: Listener, + shutdown_requested: Listener, workers: Option>>, _appdir_tempdir: TempDir, } impl Daemon { - pub fn new_random(fd_total_budget: i32) -> Daemon { - // UPnP registration might take some time and is not needed for usual daemon tests - let args = Args { devnet: true, disable_upnp: true, ..Default::default() }; - Self::new_random_with_args(args, fd_total_budget) - } - - pub fn new_random_with_args(mut args: Args, fd_total_budget: i32) -> Daemon { + pub fn fill_args_with_random_ports(args: &mut Args) { // This should ask the OS to allocate free port for socket 1 to 4. - let socket1 = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let socket1 = std::net::TcpListener::bind(format!("127.0.0.1:{}", args.rpclisten.map_or(0, |x| x.normalize(0).port))).unwrap(); let rpc_port = socket1.local_addr().unwrap().port(); - let socket2 = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let socket2 = std::net::TcpListener::bind(format!("127.0.0.1:{}", args.listen.map_or(0, |x| x.normalize(0).port))).unwrap(); let p2p_port = socket2.local_addr().unwrap().port(); let socket3 = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); @@ -54,57 +120,72 @@ impl Daemon { args.listen = Some(format!("0.0.0.0:{p2p_port}").try_into().unwrap()); args.rpclisten_json = Some(format!("0.0.0.0:{rpc_json_port}").parse().unwrap()); args.rpclisten_borsh = Some(format!("0.0.0.0:{rpc_borsh_port}").parse().unwrap()); + } + + pub fn new_random(fd_total_budget: i32) -> Daemon { + // UPnP registration might take some time and is not needed for usual daemon tests + let args = Args { devnet: true, disable_upnp: true, ..Default::default() }; + Self::new_random_with_args(args, fd_total_budget) + } + + pub fn new_random_with_args(mut args: Args, fd_total_budget: i32) -> Daemon { + Self::fill_args_with_random_ports(&mut args); + let client_manager = Arc::new(ClientManager::new(args)); + Self::with_manager(client_manager, fd_total_budget) + } + + pub fn with_manager(client_manager: Arc, fd_total_budget: i32) -> Daemon { let appdir_tempdir = get_kaspa_tempdir(); - args.appdir = Some(appdir_tempdir.path().to_str().unwrap().to_owned()); + client_manager.args.write().appdir = Some(appdir_tempdir.path().to_str().unwrap().to_owned()); + let (core, _) = create_core_with_runtime(&Default::default(), &client_manager.args.read(), fd_total_budget); + let async_service = &Arc::downcast::(core.find(AsyncRuntime::IDENT).unwrap().arc_any()).unwrap(); + let rpc_core_service = &Arc::downcast::(async_service.find(RpcCoreService::IDENT).unwrap().arc_any()).unwrap(); + let shutdown_requested = rpc_core_service.core_shutdown_request_listener(); + let grpc_server = &Arc::downcast::(async_service.find(GrpcService::IDENT).unwrap().arc_any()).unwrap(); + let grpc_server_started = grpc_server.started(); + Daemon { client_manager, core, grpc_server_started, shutdown_requested, workers: None, _appdir_tempdir: appdir_tempdir } + } - let network = args.network(); - let (core, _) = create_core_with_runtime(&Default::default(), &args, fd_total_budget); - Daemon { network, rpc_port, p2p_port, core, workers: None, _appdir_tempdir: appdir_tempdir } + pub fn client_manager(&self) -> Arc { + self.client_manager.clone() } - pub async fn start(&mut self) -> GrpcClient { + pub fn grpc_server_started(&self) -> Listener { + self.grpc_server_started.clone() + } + + pub fn shutdown_requested(&self) -> Listener { + self.shutdown_requested.clone() + } + + pub fn run(&mut self) { self.workers = Some(self.core.start()); - // Wait for the node to initialize before connecting to RPC - tokio::time::sleep(Duration::from_secs(1)).await; - self.new_client().await } - pub fn shutdown(&mut self) { + pub fn join(&mut self) { if let Some(workers) = self.workers.take() { - self.core.shutdown(); self.core.join(workers); } } - pub async fn new_client(&self) -> GrpcClient { - GrpcClient::connect( - NotificationMode::Direct, - format!("grpc://localhost:{}", self.rpc_port), - true, - None, - false, - Some(500_000), - Default::default(), - ) - .await - .unwrap() + pub async fn start(&mut self) -> GrpcClient { + self.run(); + // Wait for the node to initialize before connecting to RPC + tokio::time::sleep(Duration::from_secs(1)).await; + self.new_client().await } - pub async fn new_client_pool( - &self, - pool_size: usize, - distribution_channel_capacity: usize, - client_op: F, - ) -> ClientPool - where - F: Fn(Arc, T) -> R + Sync + Send + Copy + 'static, - R: Future + Send, - { - let mut clients = Vec::with_capacity(pool_size); - for _ in 0..pool_size { - clients.push(Arc::new(self.new_client().await)); - } - ClientPool::new(clients, distribution_channel_capacity, client_op) + pub fn shutdown(&mut self) { + self.core.shutdown(); + self.join(); + } +} + +impl Deref for Daemon { + type Target = ClientManager; + + fn deref(&self) -> &Self::Target { + &self.client_manager } } diff --git a/testing/integration/src/common/listener.rs b/testing/integration/src/common/listener.rs new file mode 100644 index 000000000..2bde0b333 --- /dev/null +++ b/testing/integration/src/common/listener.rs @@ -0,0 +1,36 @@ +use async_channel::Receiver; +use kaspa_grpc_client::GrpcClient; +use kaspa_notify::{connection::ChannelType, events::EventType, listener::ListenerId, scope::Scope, subscription::Command}; +use kaspa_rpc_core::{api::rpc::RpcApi, notify::connection::ChannelConnection, Notification, RpcResult}; + +/// An event type bound notification listener +#[derive(Clone)] +pub struct Listener { + client: GrpcClient, + id: ListenerId, + event: EventType, + pub receiver: Receiver, +} + +impl Listener { + pub async fn subscribe(client: GrpcClient, scope: Scope) -> RpcResult { + let (sender, receiver) = async_channel::unbounded(); + let connection = ChannelConnection::new("client listener", sender, ChannelType::Closable); + let id = client.register_new_listener(connection); + let event = scope.event_type(); + client.start_notify(id, scope).await?; + let listener = Listener { client, id, event, receiver }; + Ok(listener) + } + + pub async fn execute_subscribe_command(&self, scope: Scope, command: Command) -> RpcResult<()> { + assert_eq!(self.event, (&scope).into()); + self.client.execute_subscribe_command(self.id, scope, command).await + } + + pub fn drain(&self) { + while !self.receiver.is_empty() { + let _ = self.receiver.try_recv().unwrap(); + } + } +} diff --git a/testing/integration/src/common/mod.rs b/testing/integration/src/common/mod.rs index 663390eb9..1ee8b2659 100644 --- a/testing/integration/src/common/mod.rs +++ b/testing/integration/src/common/mod.rs @@ -4,8 +4,11 @@ use std::{ path::Path, }; +pub mod args; +pub mod client; pub mod client_notify; pub mod daemon; +pub mod listener; pub mod utils; pub fn open_file(file_path: &Path) -> File { diff --git a/testing/integration/src/common/utils.rs b/testing/integration/src/common/utils.rs index cbde1bd53..ebe812f08 100644 --- a/testing/integration/src/common/utils.rs +++ b/testing/integration/src/common/utils.rs @@ -1,30 +1,41 @@ +use super::client::ListeningClient; use itertools::Itertools; +use kaspa_addresses::Address; use kaspa_consensus_core::{ constants::TX_VERSION, sign::sign, subnets::SUBNETWORK_ID_NATIVE, - tx::{ScriptPublicKey, SignableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutput}, + tx::{ + MutableTransaction, ScriptPublicKey, SignableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutpoint, + TransactionOutput, UtxoEntry, + }, utxo::{ utxo_collection::{UtxoCollection, UtxoCollectionExtensions}, utxo_diff::UtxoDiff, }, }; use kaspa_core::info; +use kaspa_grpc_client::GrpcClient; +use kaspa_rpc_core::{api::rpc::RpcApi, BlockAddedNotification, Notification, VirtualDaaScoreChangedNotification}; +use kaspa_txscript::pay_to_address_script; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use secp256k1::KeyPair; use std::{ collections::{hash_map::Entry::Occupied, HashMap, HashSet}, + future::Future, sync::Arc, + time::Duration, }; +use tokio::time::timeout; pub(crate) const EXPAND_FACTOR: u64 = 1; pub(crate) const CONTRACT_FACTOR: u64 = 1; -fn estimated_mass(num_inputs: usize, num_outputs: u64) -> u64 { +const fn estimated_mass(num_inputs: usize, num_outputs: u64) -> u64 { 200 + 34 * num_outputs + 1000 * (num_inputs as u64) } -fn required_fee(num_inputs: usize, num_outputs: u64) -> u64 { +pub const fn required_fee(num_inputs: usize, num_outputs: u64) -> u64 { const FEE_PER_MASS: u64 = 10; FEE_PER_MASS * estimated_mass(num_inputs, num_outputs) } @@ -105,3 +116,102 @@ pub fn verify_tx_dag(initial_utxoset: &UtxoCollection, txs: &[Arc]) assert!(prev_txs.insert(tx.id(), tx.clone()).is_none()); } } + +pub async fn wait_for(sleep_millis: u64, max_iterations: u64, success: impl Fn() -> Fut, panic_message: &'static str) +where + Fut: Future, +{ + let mut i: u64 = 0; + loop { + i += 1; + tokio::time::sleep(Duration::from_millis(sleep_millis)).await; + if success().await { + break; + } else if i >= max_iterations { + panic!("{}", panic_message); + } + } +} + +pub fn generate_tx( + schnorr_key: KeyPair, + utxos: &[(TransactionOutpoint, UtxoEntry)], + amount: u64, + num_outputs: u64, + address: &Address, +) -> Transaction { + let total_in = utxos.iter().map(|x| x.1.amount).sum::(); + assert!(amount <= total_in - required_fee(utxos.len(), num_outputs)); + let script_public_key = pay_to_address_script(address); + let inputs = utxos + .iter() + .map(|(op, _)| TransactionInput { previous_outpoint: *op, signature_script: vec![], sequence: 0, sig_op_count: 1 }) + .collect_vec(); + + let outputs = (0..num_outputs) + .map(|_| TransactionOutput { value: amount / num_outputs, script_public_key: script_public_key.clone() }) + .collect_vec(); + let unsigned_tx = Transaction::new(TX_VERSION, inputs, outputs, 0, SUBNETWORK_ID_NATIVE, 0, vec![]); + let signed_tx = + sign(MutableTransaction::with_entries(unsigned_tx, utxos.iter().map(|(_, entry)| entry.clone()).collect_vec()), schnorr_key); + signed_tx.tx +} + +pub async fn fetch_spendable_utxos( + client: &GrpcClient, + address: Address, + coinbase_maturity: u64, +) -> Vec<(TransactionOutpoint, UtxoEntry)> { + let resp = client.get_utxos_by_addresses(vec![address.clone()]).await.unwrap(); + let virtual_daa_score = client.get_server_info().await.unwrap().virtual_daa_score; + let mut utxos = Vec::with_capacity(resp.len()); + for resp_entry in + resp.into_iter().filter(|resp_entry| is_utxo_spendable(&resp_entry.utxo_entry, virtual_daa_score, coinbase_maturity)) + { + assert!(resp_entry.address.is_some()); + assert_eq!(*resp_entry.address.as_ref().unwrap(), address); + utxos.push((resp_entry.outpoint, resp_entry.utxo_entry)); + } + utxos.sort_by(|a, b| b.1.amount.cmp(&a.1.amount)); + utxos +} + +pub fn is_utxo_spendable(entry: &UtxoEntry, virtual_daa_score: u64, coinbase_maturity: u64) -> bool { + let needed_confirmations = if !entry.is_coinbase { 10 } else { coinbase_maturity }; + entry.block_daa_score + needed_confirmations <= virtual_daa_score +} + +pub async fn mine_block(pay_address: Address, submitting_client: &GrpcClient, listening_clients: &[ListeningClient]) { + // Discard all unreceived block added notifications in each listening client + listening_clients.iter().for_each(|x| x.block_added_listener().unwrap().drain()); + + // Mine a block + let template = submitting_client.get_block_template(pay_address.clone(), vec![]).await.unwrap(); + let block_hash = template.block.header.hash; + submitting_client.submit_block(template.block, false).await.unwrap(); + + // Wait for each listening client to get notified the submitted block was added to the DAG + for client in listening_clients.iter() { + let block_daa_score: u64 = match timeout(Duration::from_millis(500), client.block_added_listener().unwrap().receiver.recv()) + .await + .unwrap() + .unwrap() + { + Notification::BlockAdded(BlockAddedNotification { block }) => { + assert_eq!(block.header.hash, block_hash); + block.header.daa_score + } + _ => panic!("wrong notification type"), + }; + match timeout(Duration::from_millis(500), client.virtual_daa_score_changed_listener().unwrap().receiver.recv()) + .await + .unwrap() + .unwrap() + { + Notification::VirtualDaaScoreChanged(VirtualDaaScoreChangedNotification { virtual_daa_score }) => { + assert_eq!(virtual_daa_score, block_daa_score + 1); + } + _ => panic!("wrong notification type"), + } + } +} diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index 819951883..e66baaf69 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -53,6 +53,7 @@ use kaspa_database::prelude::{CachePolicy, ConnBuilder}; use kaspa_index_processor::service::IndexService; use kaspa_math::Uint256; use kaspa_muhash::MuHash; +use kaspa_notify::subscription::context::SubscriptionContext; use kaspa_txscript::caches::TxScriptCacheCounters; use kaspa_utxoindex::api::{UtxoIndexApi, UtxoIndexProxy}; use kaspa_utxoindex::UtxoIndex; @@ -939,8 +940,9 @@ async fn json_test(file_path: &str, concurrency: bool) { let tick_service = Arc::new(TickService::default()); let (notification_send, notification_recv) = unbounded(); - let tc = Arc::new(TestConsensus::with_notifier(&config, notification_send)); - let notify_service = Arc::new(NotifyService::new(tc.notification_root(), notification_recv)); + let subscription_context = SubscriptionContext::new(); + let tc = Arc::new(TestConsensus::with_notifier(&config, notification_send, subscription_context.clone())); + let notify_service = Arc::new(NotifyService::new(tc.notification_root(), notification_recv, subscription_context.clone())); // External storage for storing block bodies. This allows separating header and body processing phases let (_external_db_lifetime, external_storage) = create_temp_db!(ConnBuilder::default().with_files_limit(10)); @@ -948,7 +950,11 @@ async fn json_test(file_path: &str, concurrency: bool) { let (_utxoindex_db_lifetime, utxoindex_db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)); let consensus_manager = Arc::new(ConsensusManager::new(Arc::new(TestConsensusFactory::new(tc.clone())))); let utxoindex = UtxoIndex::new(consensus_manager.clone(), utxoindex_db).unwrap(); - let index_service = Arc::new(IndexService::new(¬ify_service.notifier(), Some(UtxoIndexProxy::new(utxoindex.clone())))); + let index_service = Arc::new(IndexService::new( + ¬ify_service.notifier(), + subscription_context.clone(), + Some(UtxoIndexProxy::new(utxoindex.clone())), + )); let async_runtime = Arc::new(AsyncRuntime::new(2)); async_runtime.register(tick_service.clone()); diff --git a/testing/integration/src/daemon_integration_tests.rs b/testing/integration/src/daemon_integration_tests.rs index f8d0a2c1f..9c0e11718 100644 --- a/testing/integration/src/daemon_integration_tests.rs +++ b/testing/integration/src/daemon_integration_tests.rs @@ -1,12 +1,20 @@ +use crate::common::{ + client::ListeningClient, + client_notify::ChannelNotify, + daemon::Daemon, + utils::{fetch_spendable_utxos, generate_tx, mine_block, wait_for}, +}; use kaspa_addresses::Address; use kaspa_alloc::init_allocator_with_default_settings; +use kaspa_consensus::params::SIMNET_PARAMS; use kaspa_consensusmanager::ConsensusManager; -use kaspa_core::task::runtime::AsyncRuntime; -use kaspa_notify::scope::{Scope, VirtualDaaScoreChangedScope}; -use kaspa_rpc_core::{api::rpc::RpcApi, Notification}; +use kaspa_core::{task::runtime::AsyncRuntime, trace}; +use kaspa_grpc_client::GrpcClient; +use kaspa_notify::scope::{BlockAddedScope, UtxosChangedScope, VirtualDaaScoreChangedScope}; +use kaspa_rpc_core::{api::rpc::RpcApi, Notification, RpcTransactionId}; +use kaspa_txscript::pay_to_address_script; use kaspad_lib::args::Args; - -use crate::common::{client_notify::ChannelNotify, daemon::Daemon}; +use rand::thread_rng; use std::{sync::Arc, time::Duration}; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -18,9 +26,11 @@ async fn daemon_sanity_test() { let total_fd_limit = 10; let mut kaspad1 = Daemon::new_random(total_fd_limit); let rpc_client1 = kaspad1.start().await; + assert!(rpc_client1.handle_message_id() && rpc_client1.handle_stop_notify(), "the client failed to collect server features"); let mut kaspad2 = Daemon::new_random(total_fd_limit); let rpc_client2 = kaspad2.start().await; + assert!(rpc_client2.handle_message_id() && rpc_client2.handle_stop_notify(), "the client failed to collect server features"); tokio::time::sleep(Duration::from_secs(1)).await; rpc_client1.disconnect().await.unwrap(); @@ -58,7 +68,7 @@ async fn daemon_mining_test() { let (sender, event_receiver) = async_channel::unbounded(); rpc_client1.start(Some(Arc::new(ChannelNotify::new(sender)))).await; - rpc_client1.start_notify(Default::default(), Scope::VirtualDaaScoreChanged(VirtualDaaScoreChangedScope {})).await.unwrap(); + rpc_client1.start_notify(Default::default(), VirtualDaaScoreChangedScope {}.into()).await.unwrap(); // Mine 10 blocks to daemon #1 let mut last_block_hash = None; @@ -103,6 +113,215 @@ async fn daemon_mining_test() { } } +/// `cargo test --release --package kaspa-testing-integration --lib -- daemon_integration_tests::daemon_utxos_propagation_test` +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn daemon_utxos_propagation_test() { + #[cfg(feature = "heap")] + let _profiler = dhat::Profiler::builder().file_name("kaspa-testing-integration-heap.json").build(); + + kaspa_core::log::try_init_logger( + "INFO,kaspa_testing_integration=trace,kaspa_notify=debug,kaspa_rpc_core=debug,kaspa_grpc_client=debug", + ); + + let args = Args { + simnet: true, + unsafe_rpc: true, + enable_unsynced_mining: true, + disable_upnp: true, // UPnP registration might take some time and is not needed for this test + utxoindex: true, + ..Default::default() + }; + let total_fd_limit = 10; + + let coinbase_maturity = SIMNET_PARAMS.coinbase_maturity; + let mut kaspad1 = Daemon::new_random_with_args(args.clone(), total_fd_limit); + let mut kaspad2 = Daemon::new_random_with_args(args, total_fd_limit); + let rpc_client1 = kaspad1.start().await; + let rpc_client2 = kaspad2.start().await; + + // Let rpc_client1 receive virtual DAA score changed notifications + let (sender1, event_receiver1) = async_channel::unbounded(); + rpc_client1.start(Some(Arc::new(ChannelNotify::new(sender1)))).await; + rpc_client1.start_notify(Default::default(), VirtualDaaScoreChangedScope {}.into()).await.unwrap(); + + // Connect kaspad2 to kaspad1 + rpc_client2.add_peer(format!("127.0.0.1:{}", kaspad1.p2p_port).try_into().unwrap(), true).await.unwrap(); + let check_client = rpc_client2.clone(); + wait_for( + 50, + 20, + move || { + async fn peer_connected(client: GrpcClient) -> bool { + client.get_connected_peer_info().await.unwrap().peer_info.len() == 1 + } + Box::pin(peer_connected(check_client.clone())) + }, + "the nodes did not connect to each other", + ) + .await; + + // Mining key and address + let (miner_sk, miner_pk) = secp256k1::generate_keypair(&mut thread_rng()); + let miner_address = + Address::new(kaspad1.network.into(), kaspa_addresses::Version::PubKey, &miner_pk.x_only_public_key().0.serialize()); + let miner_schnorr_key = secp256k1::KeyPair::from_secret_key(secp256k1::SECP256K1, &miner_sk); + let miner_spk = pay_to_address_script(&miner_address); + + // User key and address + let (_user_sk, user_pk) = secp256k1::generate_keypair(&mut thread_rng()); + let user_address = + Address::new(kaspad1.network.into(), kaspa_addresses::Version::PubKey, &user_pk.x_only_public_key().0.serialize()); + + // Some dummy non-monitored address + let blank_address = Address::new(kaspad1.network.into(), kaspa_addresses::Version::PubKey, &[0; 32]); + + // Mine 1000 blocks to daemon #1 + let initial_blocks = coinbase_maturity; + let mut last_block_hash = None; + for i in 0..initial_blocks { + let template = rpc_client1.get_block_template(miner_address.clone(), vec![]).await.unwrap(); + last_block_hash = Some(template.block.header.hash); + rpc_client1.submit_block(template.block, false).await.unwrap(); + + while let Ok(notification) = match tokio::time::timeout(Duration::from_secs(1), event_receiver1.recv()).await { + Ok(res) => res, + Err(elapsed) => panic!("expected virtual event before {}", elapsed), + } { + match notification { + Notification::VirtualDaaScoreChanged(msg) if msg.virtual_daa_score == i + 1 => { + break; + } + Notification::VirtualDaaScoreChanged(msg) if msg.virtual_daa_score > i + 1 => { + panic!("DAA score too high for number of submitted blocks") + } + Notification::VirtualDaaScoreChanged(_) => {} + _ => panic!("expected only DAA score notifications"), + } + } + } + + let check_client = rpc_client2.clone(); + wait_for( + 50, + 20, + move || { + async fn daa_score_reached(client: GrpcClient) -> bool { + let virtual_daa_score = client.get_server_info().await.unwrap().virtual_daa_score; + trace!("Virtual DAA score: {}", virtual_daa_score); + virtual_daa_score == SIMNET_PARAMS.coinbase_maturity + } + Box::pin(daa_score_reached(check_client.clone())) + }, + "the nodes did not add and relay all the initial blocks", + ) + .await; + + // Expect the blocks to be relayed to daemon #2 + let dag_info = rpc_client2.get_block_dag_info().await.unwrap(); + assert_eq!(dag_info.block_count, initial_blocks); + assert_eq!(dag_info.sink, last_block_hash.unwrap()); + + // Check that acceptance data contains the expected coinbase tx ids + let vc = rpc_client2.get_virtual_chain_from_block(kaspa_consensus::params::SIMNET_GENESIS.hash, true).await.unwrap(); + assert_eq!(vc.removed_chain_block_hashes.len(), 0); + assert_eq!(vc.added_chain_block_hashes.len() as u64, initial_blocks); + assert_eq!(vc.accepted_transaction_ids.len() as u64, initial_blocks); + for accepted_txs_pair in vc.accepted_transaction_ids { + assert_eq!(accepted_txs_pair.accepted_transaction_ids.len(), 1); + } + + // Create a multi-listener RPC client on each node... + let mut clients = vec![ListeningClient::connect(&kaspad2).await, ListeningClient::connect(&kaspad1).await]; + + // ...and subscribe each to some notifications + for x in clients.iter_mut() { + x.start_notify(BlockAddedScope {}.into()).await.unwrap(); + x.start_notify(UtxosChangedScope::new(vec![miner_address.clone(), user_address.clone()]).into()).await.unwrap(); + x.start_notify(VirtualDaaScoreChangedScope {}.into()).await.unwrap(); + } + + // Mine some extra blocks so the latest miner reward is added to its balance and some UTXOs reach maturity + const EXTRA_BLOCKS: usize = 10; + for _ in 0..EXTRA_BLOCKS { + mine_block(blank_address.clone(), &rpc_client1, &clients).await; + } + + // Check the balance of the miner address + let miner_balance = rpc_client2.get_balance_by_address(miner_address.clone()).await.unwrap(); + assert_eq!(miner_balance, initial_blocks * SIMNET_PARAMS.pre_deflationary_phase_base_subsidy); + let miner_balance = rpc_client1.get_balance_by_address(miner_address.clone()).await.unwrap(); + assert_eq!(miner_balance, initial_blocks * SIMNET_PARAMS.pre_deflationary_phase_base_subsidy); + + // Get the miner UTXOs + let utxos = fetch_spendable_utxos(&rpc_client1, miner_address.clone(), coinbase_maturity).await; + assert_eq!(utxos.len(), EXTRA_BLOCKS - 1); + for utxo in utxos.iter() { + assert!(utxo.1.is_coinbase); + assert_eq!(utxo.1.amount, SIMNET_PARAMS.pre_deflationary_phase_base_subsidy); + assert_eq!(utxo.1.script_public_key, miner_spk); + } + + // Drain UTXOs and Virtual DAA score changed notification channels + clients.iter().for_each(|x| x.utxos_changed_listener().unwrap().drain()); + clients.iter().for_each(|x| x.virtual_daa_score_changed_listener().unwrap().drain()); + + // Spend some coins + const NUMBER_INPUTS: u64 = 2; + const NUMBER_OUTPUTS: u64 = 2; + const TX_AMOUNT: u64 = SIMNET_PARAMS.pre_deflationary_phase_base_subsidy * (NUMBER_INPUTS * 5 - 1) / 5; + let transaction = generate_tx(miner_schnorr_key, &utxos[0..NUMBER_INPUTS as usize], TX_AMOUNT, NUMBER_OUTPUTS, &user_address); + rpc_client1.submit_transaction((&transaction).into(), false).await.unwrap(); + + let check_client = rpc_client1.clone(); + let transaction_id = transaction.id(); + wait_for( + 50, + 20, + move || { + async fn transaction_in_mempool(client: GrpcClient, transaction_id: RpcTransactionId) -> bool { + let entry = client.get_mempool_entry(transaction_id, false, false).await; + entry.is_ok() + } + Box::pin(transaction_in_mempool(check_client.clone(), transaction_id)) + }, + "the transaction was not added to the mempool", + ) + .await; + + mine_block(blank_address.clone(), &rpc_client1, &clients).await; + + // Check UTXOs changed notifications + for x in clients.iter() { + let Notification::UtxosChanged(uc) = x.utxos_changed_listener().unwrap().receiver.recv().await.unwrap() else { + panic!("wrong notification type") + }; + assert!(uc.removed.iter().all(|x| x.address.is_some() && *x.address.as_ref().unwrap() == miner_address)); + assert!(uc.added.iter().all(|x| x.address.is_some() && *x.address.as_ref().unwrap() == user_address)); + assert_eq!(uc.removed.len() as u64, NUMBER_INPUTS); + assert_eq!(uc.added.len() as u64, NUMBER_OUTPUTS); + assert_eq!( + uc.removed.iter().map(|x| x.utxo_entry.amount).sum::(), + SIMNET_PARAMS.pre_deflationary_phase_base_subsidy * NUMBER_INPUTS + ); + assert_eq!(uc.added.iter().map(|x| x.utxo_entry.amount).sum::(), TX_AMOUNT); + } + + // Check the balance of both miner and user addresses + for x in clients.iter() { + let miner_balance = x.get_balance_by_address(miner_address.clone()).await.unwrap(); + assert_eq!(miner_balance, (initial_blocks - NUMBER_INPUTS) * SIMNET_PARAMS.pre_deflationary_phase_base_subsidy); + + let user_balance = x.get_balance_by_address(user_address.clone()).await.unwrap(); + assert_eq!(user_balance, TX_AMOUNT); + } + + // Terminate multi-listener clients + for x in clients.iter() { + x.disconnect().await.unwrap(); + x.join().await.unwrap(); + } +} + // The following test runtime parameters are required for a graceful shutdown of the gRPC server #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn daemon_cleaning_test() { diff --git a/testing/integration/src/lib.rs b/testing/integration/src/lib.rs index 95a21e5d5..a5be1b23d 100644 --- a/testing/integration/src/lib.rs +++ b/testing/integration/src/lib.rs @@ -1,4 +1,10 @@ +#[cfg(feature = "heap")] +#[global_allocator] +#[cfg(not(feature = "heap"))] +static ALLOC: dhat::Alloc = dhat::Alloc; + pub mod common; +pub mod tasks; #[cfg(test)] pub mod consensus_integration_tests; @@ -13,5 +19,9 @@ pub mod daemon_integration_tests; #[cfg(feature = "devnet-prealloc")] pub mod mempool_benchmarks; +#[cfg(test)] +#[cfg(feature = "devnet-prealloc")] +pub mod subscribe_benchmarks; + #[cfg(test)] pub mod rpc_tests; diff --git a/testing/integration/src/mempool_benchmarks.rs b/testing/integration/src/mempool_benchmarks.rs index dc2cc06ce..feddfbba4 100644 --- a/testing/integration/src/mempool_benchmarks.rs +++ b/testing/integration/src/mempool_benchmarks.rs @@ -1,4 +1,13 @@ -use crate::common::{self, client_notify::ChannelNotify, daemon::Daemon, utils::CONTRACT_FACTOR}; +use crate::{ + common::{ + self, + args::ArgsBuilder, + client_notify::ChannelNotify, + daemon::{ClientManager, Daemon}, + utils::CONTRACT_FACTOR, + }, + tasks::{block::group::MinerGroupTask, daemon::DaemonTask, tx::group::TxSenderGroupTask, Stopper, TasksRunner}, +}; use futures_util::future::join_all; use kaspa_addresses::Address; use kaspa_consensus::params::Params; @@ -112,29 +121,27 @@ async fn bench_bbt_latency() { bbt_client.start(Some(Arc::new(ChannelNotify::new(sender)))).await; bbt_client.start_notify(ListenerId::default(), Scope::NewBlockTemplate(NewBlockTemplateScope {})).await.unwrap(); - let submit_block_pool = daemon - .new_client_pool(SUBMIT_BLOCK_CLIENTS, 100, |c, block| async move { - let _sw = kaspa_core::time::Stopwatch::<500>::with_threshold("sb"); - let response = c.submit_block(block, false).await.unwrap(); - assert_eq!(response.report, kaspa_rpc_core::SubmitBlockReport::Success); - false - }) - .await; - - let submit_tx_pool = daemon - .new_client_pool::<(usize, Arc), _, _>(SUBMIT_TX_CLIENTS, 100, |c, (i, tx)| async move { - match c.submit_transaction(tx.as_ref().into(), false).await { - Ok(_) => {} - Err(RpcError::General(msg)) if msg.contains("orphan") => { - kaspa_core::warn!("\n\n\n{msg}\n\n"); - kaspa_core::warn!("Submitted {} transactions, exiting tx submit loop", i); - return true; - } - Err(e) => panic!("{e}"), + let submit_block_pool = daemon.new_client_pool(SUBMIT_BLOCK_CLIENTS, 100).await; + let submit_block_pool_tasks = submit_block_pool.start(|c, block| async move { + let _sw = kaspa_core::time::Stopwatch::<500>::with_threshold("sb"); + let response = c.submit_block(block, false).await.unwrap(); + assert_eq!(response.report, kaspa_rpc_core::SubmitBlockReport::Success); + false + }); + + let submit_tx_pool = daemon.new_client_pool::<(usize, Arc)>(SUBMIT_TX_CLIENTS, 100).await; + let submit_tx_pool_tasks = submit_tx_pool.start(|c, (i, tx)| async move { + match c.submit_transaction(tx.as_ref().into(), false).await { + Ok(_) => {} + Err(RpcError::General(msg)) if msg.contains("orphan") => { + kaspa_core::warn!("\n\n\n{msg}\n\n"); + kaspa_core::warn!("Submitted {} transactions, exiting tx submit loop", i); + return true; } - false - }) - .await; + Err(e) => panic!("{e}"), + } + false + }); let cc = bbt_client.clone(); let exec = executing.clone(); @@ -263,8 +270,8 @@ async fn bench_bbt_latency() { submit_block_pool.close(); submit_tx_pool.close(); - join_all(submit_block_pool.join_handles).await; - join_all(submit_tx_pool.join_handles).await; + join_all(submit_block_pool_tasks).await; + join_all(submit_tx_pool_tasks).await; // // Fold-up @@ -273,3 +280,86 @@ async fn bench_bbt_latency() { drop(client); daemon.shutdown(); } + +/// Run this benchmark with the following command line: +/// `cargo test --release --package kaspa-testing-integration --lib --features devnet-prealloc -- mempool_benchmarks::bench_bbt_latency_2 --exact --nocapture --ignored` +#[tokio::test] +#[ignore = "bmk"] +async fn bench_bbt_latency_2() { + kaspa_core::panic::configure_panic(); + kaspa_core::log::try_init_logger("info,kaspa_core::time=debug,kaspa_mining::monitor=debug"); + + // Constants + const BLOCK_COUNT: usize = usize::MAX; + + const MEMPOOL_TARGET: u64 = 600_000; + const TX_COUNT: usize = 1_400_000; + const TX_LEVEL_WIDTH: usize = 20_000; + const TPS_PRESSURE: u64 = u64::MAX; + + const SUBMIT_BLOCK_CLIENTS: usize = 20; + const SUBMIT_TX_CLIENTS: usize = 2; + + if TX_COUNT < TX_LEVEL_WIDTH { + panic!() + } + + /* + Logic: + 1. Use the new feature for preallocating utxos + 2. Set up a dataset with a DAG of signed txs over the preallocated utxoset + 3. Create constant mempool pressure by submitting txs (via rpc for now) + 4. Mine to the node (simulated) + 5. Measure bbt latency, real-time bps, real-time throughput, mempool draining rate (tbd) + + TODO: + 1. More measurements with statistical aggregation + 2. Save TX DAG dataset in a file for benchmark replication and stability + 3. Add P2P TX traffic by implementing a custom P2P peer which only broadcasts txs + */ + + // + // Setup + // + let (prealloc_sk, prealloc_pk) = secp256k1::generate_keypair(&mut thread_rng()); + let prealloc_address = + Address::new(NetworkType::Simnet.into(), kaspa_addresses::Version::PubKey, &prealloc_pk.x_only_public_key().0.serialize()); + let schnorr_key = secp256k1::KeyPair::from_secret_key(secp256k1::SECP256K1, &prealloc_sk); + let spk = pay_to_address_script(&prealloc_address); + + let args = ArgsBuilder::simnet(TX_LEVEL_WIDTH as u64 * CONTRACT_FACTOR, 500) + .prealloc_address(prealloc_address) + .apply_args(Daemon::fill_args_with_random_ports) + .build(); + + let network = args.network(); + let params: Params = network.into(); + + let utxoset = args.generate_prealloc_utxos(args.num_prealloc_utxos.unwrap()); + let txs = common::utils::generate_tx_dag(utxoset.clone(), schnorr_key, spk, TX_COUNT / TX_LEVEL_WIDTH, TX_LEVEL_WIDTH); + common::utils::verify_tx_dag(&utxoset, &txs); + info!("Generated overall {} txs", txs.len()); + + let client_manager = Arc::new(ClientManager::new(args)); + let mut tasks = TasksRunner::new(Some(DaemonTask::build(client_manager.clone()))) + .launch() + .await + .task( + MinerGroupTask::build(network, client_manager.clone(), SUBMIT_BLOCK_CLIENTS, params.bps(), BLOCK_COUNT, Stopper::Signal) + .await, + ) + .task( + TxSenderGroupTask::build( + client_manager.clone(), + SUBMIT_TX_CLIENTS, + false, + txs, + TPS_PRESSURE, + MEMPOOL_TARGET, + Stopper::Signal, + ) + .await, + ); + tasks.run().await; + tasks.join().await; +} diff --git a/testing/integration/src/rpc_tests.rs b/testing/integration/src/rpc_tests.rs index d8fc9a180..4fbb4155b 100644 --- a/testing/integration/src/rpc_tests.rs +++ b/testing/integration/src/rpc_tests.rs @@ -56,7 +56,7 @@ async fn sanity_test() { let mut daemon = Daemon::new_random_with_args(args, fd_total_budget); let client = daemon.start().await; let (sender, _) = async_channel::unbounded(); - let connection = ChannelConnection::new(sender, ChannelType::Closable); + let connection = ChannelConnection::new("test", sender, ChannelType::Closable); let listener_id = client.register_new_listener(connection); let mut tasks: Vec> = Vec::new(); @@ -64,6 +64,7 @@ async fn sanity_test() { // is to force any implementor of a new RpcApi method to add a matching arm here and to strongly incentivize // the adding of an actual sanity test of said new method. for op in KaspadPayloadOps::list() { + let network_id = daemon.network; let task: JoinHandle<()> = match op { KaspadPayloadOps::SubmitBlock => { let rpc_client = client.clone(); @@ -157,7 +158,7 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let response = rpc_client.get_current_network_call(GetCurrentNetworkRequest {}).await.unwrap(); - assert_eq!(response.network, daemon.network.network_type); + assert_eq!(response.network, network_id.network_type); }) } @@ -322,7 +323,7 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let response = rpc_client.get_block_dag_info_call(GetBlockDagInfoRequest {}).await.unwrap(); - assert_eq!(response.network, daemon.network); + assert_eq!(response.network, network_id); }) } @@ -508,7 +509,7 @@ async fn sanity_test() { tst!(op, { let response = rpc_client.get_server_info_call(GetServerInfoRequest {}).await.unwrap(); assert!(response.has_utxo_index); // we set utxoindex above - assert_eq!(response.network_id, daemon.network); + assert_eq!(response.network_id, network_id); }) } @@ -571,7 +572,7 @@ async fn sanity_test() { let rpc_client = client.clone(); let id = listener_id; tst!(op, { - rpc_client.start_notify(id, UtxosChangedScope { addresses: vec![] }.into()).await.unwrap(); + rpc_client.start_notify(id, UtxosChangedScope::new(vec![]).into()).await.unwrap(); }) } KaspadPayloadOps::NotifySinkBlueScoreChanged => { @@ -609,7 +610,7 @@ async fn sanity_test() { let rpc_client = client.clone(); let id = listener_id; tst!(op, { - rpc_client.stop_notify(id, UtxosChangedScope { addresses: vec![] }.into()).await.unwrap(); + rpc_client.stop_notify(id, UtxosChangedScope::new(vec![]).into()).await.unwrap(); }) } KaspadPayloadOps::StopNotifyingPruningPointUtxoSetOverride => { diff --git a/testing/integration/src/subscribe_benchmarks.rs b/testing/integration/src/subscribe_benchmarks.rs new file mode 100644 index 000000000..95279b176 --- /dev/null +++ b/testing/integration/src/subscribe_benchmarks.rs @@ -0,0 +1,314 @@ +use crate::{ + common::{ + self, + args::ArgsBuilder, + daemon::{ClientManager, Daemon}, + utils::CONTRACT_FACTOR, + }, + tasks::{ + block::group::MinerGroupTask, + daemon::{DaemonArgs, DaemonTask}, + memory_monitor::MemoryMonitorTask, + stat_recorder::StatRecorderTask, + subscription::group::SubscriberGroupTask, + tick::TickTask, + tx::group::TxSenderGroupTask, + Stopper, TasksRunner, + }, +}; +use itertools::Itertools; +use kaspa_addresses::Address; +use kaspa_alloc::init_allocator_with_default_settings; +use kaspa_consensus::params::Params; +use kaspa_consensus_core::network::{NetworkId, NetworkType}; +use kaspa_core::{info, task::tick::TickService, trace}; +use kaspa_math::Uint256; +use kaspa_notify::scope::VirtualDaaScoreChangedScope; +use kaspa_rpc_core::api::rpc::RpcApi; +use kaspa_txscript::pay_to_address_script; +use rand::thread_rng; +use std::{sync::Arc, time::Duration}; + +// Constants +const BLOCK_COUNT: usize = usize::MAX; + +const MEMPOOL_TARGET: u64 = 650; +const TX_COUNT: usize = 1_500_000; +const TX_LEVEL_WIDTH: usize = 20_000; +const TPS_PRESSURE: u64 = 150; // 100 +const PREALLOC_AMOUNT: u64 = 500; + +const DAEMON_LAUNCH_SECS_DELAY: u64 = 5; +const SUBMIT_BLOCK_CLIENTS: usize = 20; +const SUBMIT_TX_CLIENTS: usize = 1; +const SUBSCRIBE_WORKERS: usize = 20; + +#[cfg(feature = "heap")] +const MAX_MEMORY: u64 = 22_000_000_000; +#[cfg(not(feature = "heap"))] +const MAX_MEMORY: u64 = 31_000_000_000; + +const NOTIFY_CLIENTS: usize = 500; +const MAX_ADDRESSES: usize = 1_000_000; +const WALLET_ADDRESSES: usize = 800; + +const STAT_FOLDER: &str = "../../../analyze/mem-logs"; + +fn create_client_addresses(index: usize, network_id: &NetworkId) -> Vec
{ + // Process in heaviest to lightest requests order, maximizing messages memory footprint + // between notifiers and from notifier to broadcasters at grpc server and rpc core levels + let max_address = ((NOTIFY_CLIENTS - index) * MAX_ADDRESSES / NOTIFY_CLIENTS) + 1; + let min_address = if (NOTIFY_CLIENTS - index) % (NOTIFY_CLIENTS / 5) == 0 { + // Create a typical UTXOs monitoring service subscription scope + 0 + } else { + // Create a typical wallet subscription scope + max_address.max(WALLET_ADDRESSES) - WALLET_ADDRESSES + }; + (min_address..max_address) + .map(|x| Address::new((*network_id).into(), kaspa_addresses::Version::PubKey, &Uint256::from_u64(x as u64).to_le_bytes())) + .collect_vec() +} + +/// `cargo test --package kaspa-testing-integration --lib --features devnet-prealloc -- subscribe_benchmarks::utxos_changed_subscriptions_sanity_check --exact --nocapture --ignored` +#[tokio::test] +#[ignore = "bmk"] +async fn utxos_changed_subscriptions_sanity_check() { + init_allocator_with_default_settings(); + kaspa_core::panic::configure_panic(); + kaspa_core::log::try_init_logger( + "INFO, kaspa_core::time=debug, kaspa_rpc_core=debug, kaspa_grpc_client=debug, kaspa_notify=info, kaspa_notify::address::tracker=debug, kaspa_notify::listener=debug, kaspa_notify::subscription::single=debug, kaspa_mining::monitor=debug, kaspa_testing_integration::subscribe_benchmarks=trace", + ); + + let (prealloc_sk, _) = secp256k1::generate_keypair(&mut thread_rng()); + let args = ArgsBuilder::simnet(TX_LEVEL_WIDTH as u64 * CONTRACT_FACTOR, PREALLOC_AMOUNT) + .apply_args(Daemon::fill_args_with_random_ports) + .build(); + + // Start the daemon + info!("Launching the daemon..."); + let daemon_args = DaemonArgs::new( + args.rpclisten.map(|x| x.normalize(0).port).unwrap(), + args.listen.map(|x| x.normalize(0).port).unwrap(), + prealloc_sk.display_secret().to_string(), + Some("ucs-server".to_owned()), + 100, + true, + ); + let server_start_time = std::time::Instant::now(); + let mut daemon_process = tokio::process::Command::new("cargo") + .args(daemon_args.to_command_args("subscribe_benchmarks::bench_utxos_changed_subscriptions_daemon")) + .spawn() + .expect("failed to start daemon process"); + + // Make sure that the server was given enough time to start + let client_start_time = server_start_time + Duration::from_secs(DAEMON_LAUNCH_SECS_DELAY); + if client_start_time > std::time::Instant::now() { + tokio::time::sleep(client_start_time - std::time::Instant::now()).await; + } + + let client_manager = Arc::new(ClientManager::new(args)); + let client = client_manager.new_client().await; + + // + // Fold-up + // + kaspa_core::info!("Signal the daemon to shutdown"); + client.shutdown().await.unwrap(); + kaspa_core::warn!("Disconnect the main client"); + client.disconnect().await.unwrap(); + drop(client); + + kaspa_core::warn!("Waiting for the daemon to exit..."); + daemon_process.wait().await.expect("failed to wait for the daemon process"); +} + +/// `cargo test --package kaspa-testing-integration --lib --features devnet-prealloc -- subscribe_benchmarks::bench_utxos_changed_subscriptions_daemon --exact --nocapture --ignored -- --rpc=16610 --p2p=16611 --private-key=a2760251adb5b6e8d4514d23397f1631893e168c33f92ff8a7a24f397d355d62 --max-tracked-addresses=1000000 --utxoindex` +/// +/// This test is designed to be run as a child process, with the parent process eventually shutting it down. +/// Do not run it directly. +#[tokio::test] +#[ignore = "bmk"] +async fn bench_utxos_changed_subscriptions_daemon() { + init_allocator_with_default_settings(); + kaspa_core::panic::configure_panic(); + kaspa_core::log::try_init_logger( + "INFO, kaspa_core::core=trace, kaspa_core::time=debug, kaspa_rpc_core=debug, kaspa_grpc_client=debug, kaspa_notify=info, kaspa_notify::address::tracker=debug, kaspa_notify::listener=debug, kaspa_notify::subscription::single=debug, kaspa_mining::monitor=debug, kaspa_testing_integration::subscribe_benchmarks=trace", + ); + + let daemon_args = DaemonArgs::from_env_args(); + let args = ArgsBuilder::simnet(TX_LEVEL_WIDTH as u64 * CONTRACT_FACTOR, PREALLOC_AMOUNT).apply_daemon_args(&daemon_args).build(); + let tick_service = Arc::new(TickService::new()); + + let mut tasks = TasksRunner::new(Some(DaemonTask::with_args(args.clone()))) + .task(TickTask::build(tick_service.clone())) + .task(MemoryMonitorTask::build(tick_service, "daemon", Duration::from_secs(5), MAX_MEMORY)) + .optional_task(StatRecorderTask::optional( + Duration::from_secs(5), + STAT_FOLDER.to_owned(), + daemon_args.stat_file_prefix.clone(), + true, + )); + tasks.run().await; + tasks.join().await; + + trace!("Daemon was successfully shut down"); +} + +async fn utxos_changed_subscriptions_client(address_cycle_seconds: u64, address_max_cycles: usize) { + init_allocator_with_default_settings(); + kaspa_core::panic::configure_panic(); + kaspa_core::log::try_init_logger( + "INFO, kaspa_core::time=debug, kaspa_rpc_core=debug, kaspa_grpc_client=debug, kaspa_notify=info, kaspa_notify::address::tracker=debug, kaspa_notify::listener=debug, kaspa_notify::subscription::single=debug, kaspa_mining::monitor=debug, kaspa_testing_integration::subscribe_benchmarks=trace", + ); + + assert!(address_cycle_seconds >= 60); + if TX_COUNT < TX_LEVEL_WIDTH { + panic!() + } + + // + // Setup + // + let (prealloc_sk, prealloc_pk) = secp256k1::generate_keypair(&mut thread_rng()); + let prealloc_address = + Address::new(NetworkType::Simnet.into(), kaspa_addresses::Version::PubKey, &prealloc_pk.x_only_public_key().0.serialize()); + let schnorr_key = secp256k1::KeyPair::from_secret_key(secp256k1::SECP256K1, &prealloc_sk); + let spk = pay_to_address_script(&prealloc_address); + + let args = ArgsBuilder::simnet(TX_LEVEL_WIDTH as u64 * CONTRACT_FACTOR, PREALLOC_AMOUNT) + .prealloc_address(prealloc_address) + .max_tracked_addresses(MAX_ADDRESSES) + .utxoindex(true) + .apply_args(Daemon::fill_args_with_random_ports) + .build(); + let network = args.network(); + let params: Params = network.into(); + + let utxoset = args.generate_prealloc_utxos(args.num_prealloc_utxos.unwrap()); + let txs = common::utils::generate_tx_dag( + utxoset.clone(), + schnorr_key, + spk, + (TX_COUNT + TX_LEVEL_WIDTH - 1) / TX_LEVEL_WIDTH, + TX_LEVEL_WIDTH, + ); + common::utils::verify_tx_dag(&utxoset, &txs); + info!("Generated overall {} txs", txs.len()); + + // Start the daemon + info!("Launching the daemon..."); + let daemon_args = DaemonArgs::new( + args.rpclisten.map(|x| x.normalize(0).port).unwrap(), + args.listen.map(|x| x.normalize(0).port).unwrap(), + prealloc_sk.display_secret().to_string(), + Some("ucs-server".to_owned()), + MAX_ADDRESSES, + true, + ); + let server_start_time = std::time::Instant::now(); + let mut daemon_process = tokio::process::Command::new("cargo") + .args(daemon_args.to_command_args("subscribe_benchmarks::bench_utxos_changed_subscriptions_daemon")) + .spawn() + .expect("failed to start daemon process"); + + // Make sure that the server was given enough time to start + let client_start_time = server_start_time + Duration::from_secs(DAEMON_LAUNCH_SECS_DELAY); + if client_start_time > std::time::Instant::now() { + tokio::time::sleep(client_start_time - std::time::Instant::now()).await; + } + + // Initial objects + let subscribing_addresses = (0..NOTIFY_CLIENTS).map(|i| Arc::new(create_client_addresses(i, ¶ms.net))).collect_vec(); + let client_manager = Arc::new(ClientManager::new(args)); + let client = client_manager.new_client().await; + let tick_service = Arc::new(TickService::new()); + + let mut tasks = TasksRunner::new(None) + .task(TickTask::build(tick_service.clone())) + .task(MemoryMonitorTask::build(tick_service.clone(), "client", Duration::from_secs(5), MAX_MEMORY)) + .task( + MinerGroupTask::build(network, client_manager.clone(), SUBMIT_BLOCK_CLIENTS, params.bps(), BLOCK_COUNT, Stopper::Signal) + .await, + ) + .task( + TxSenderGroupTask::build( + client_manager.clone(), + SUBMIT_TX_CLIENTS, + true, + txs, + TPS_PRESSURE, + MEMPOOL_TARGET, + Stopper::Signal, + ) + .await, + ) + .task( + SubscriberGroupTask::build( + client_manager, + SUBSCRIBE_WORKERS, + params.bps(), + vec![VirtualDaaScoreChangedScope {}.into()], + 3, + subscribing_addresses, + 5, + address_cycle_seconds, + address_max_cycles, + ) + .await, + ); + tasks.run().await; + tasks.join().await; + + // + // Fold-up + // + kaspa_core::info!("Signal the daemon to shutdown"); + client.shutdown().await.unwrap(); + kaspa_core::warn!("Disconnect the main client"); + client.disconnect().await.unwrap(); + drop(client); + + kaspa_core::warn!("Waiting for the daemon to exit..."); + daemon_process.wait().await.expect("failed to wait for the daemon process"); +} + +/// `cargo test --package kaspa-testing-integration --lib --features devnet-prealloc -- subscribe_benchmarks::bench_utxos_changed_subscriptions_footprint_a --exact --nocapture --ignored` +#[tokio::test] +#[ignore = "bmk"] +async fn bench_utxos_changed_subscriptions_footprint_a() { + // No subscriptions + utxos_changed_subscriptions_client(1200, 0).await; +} + +/// `cargo test --package kaspa-testing-integration --lib --features devnet-prealloc -- subscribe_benchmarks::bench_utxos_changed_subscriptions_footprint_b --exact --nocapture --ignored` +#[tokio::test] +#[ignore = "bmk"] +async fn bench_utxos_changed_subscriptions_footprint_b() { + // Single initial subscriptions, no cycles + utxos_changed_subscriptions_client(60, 1).await; +} + +/// `cargo test --package kaspa-testing-integration --lib --features devnet-prealloc -- subscribe_benchmarks::bench_utxos_changed_subscriptions_footprint_c --exact --nocapture --ignored` +#[tokio::test] +#[ignore = "bmk"] +async fn bench_utxos_changed_subscriptions_footprint_c() { + // 2 hours subscription cycles + utxos_changed_subscriptions_client(7200, usize::MAX).await; +} + +/// `cargo test --package kaspa-testing-integration --lib --features devnet-prealloc -- subscribe_benchmarks::bench_utxos_changed_subscriptions_footprint_d --exact --nocapture --ignored` +#[tokio::test] +#[ignore = "bmk"] +async fn bench_utxos_changed_subscriptions_footprint_d() { + // 30 minutes subscription cycles + utxos_changed_subscriptions_client(1800, usize::MAX).await; +} + +/// `cargo test --package kaspa-testing-integration --lib --features devnet-prealloc -- subscribe_benchmarks::bench_utxos_changed_subscriptions_footprint_e --exact --nocapture --ignored` +#[tokio::test] +#[ignore = "bmk"] +async fn bench_utxos_changed_subscriptions_footprint_e() { + // 3 minutes subscription cycles + utxos_changed_subscriptions_client(180, usize::MAX).await; +} diff --git a/testing/integration/src/tasks/block/group.rs b/testing/integration/src/tasks/block/group.rs new file mode 100644 index 000000000..05eb60fb7 --- /dev/null +++ b/testing/integration/src/tasks/block/group.rs @@ -0,0 +1,68 @@ +use crate::{ + common::daemon::ClientManager, + tasks::{ + block::{miner::BlockMinerTask, submitter::BlockSubmitterTask, template_receiver::BlockTemplateReceiverTask}, + Stopper, Task, + }, +}; +use async_trait::async_trait; +use itertools::chain; +use kaspa_addresses::Address; +use kaspa_consensus_core::network::NetworkId; +use kaspa_core::debug; +use kaspa_utils::triggers::SingleTrigger; +use rand::thread_rng; +use std::sync::Arc; +use tokio::task::JoinHandle; + +pub struct MinerGroupTask { + submitter: Arc, + receiver: Arc, + miner: Arc, +} + +impl MinerGroupTask { + pub fn new(submitter: Arc, receiver: Arc, miner: Arc) -> Self { + Self { submitter, receiver, miner } + } + + pub async fn build( + network: NetworkId, + client_manager: Arc, + submitter_pool_size: usize, + bps: u64, + block_count: usize, + stopper: Stopper, + ) -> Arc { + // Block submitter + let submitter = BlockSubmitterTask::build(client_manager.clone(), submitter_pool_size, stopper).await; + + // Mining key and address + let (sk, pk) = &secp256k1::generate_keypair(&mut thread_rng()); + let pay_address = + Address::new(network.network_type().into(), kaspa_addresses::Version::PubKey, &pk.x_only_public_key().0.serialize()); + debug!("Generated private key {} and address {}", sk.display_secret(), pay_address); + + // Block template receiver + let client = Arc::new(client_manager.new_client().await); + let receiver = BlockTemplateReceiverTask::build(client.clone(), pay_address.clone(), stopper).await; + + // Miner + let miner = + BlockMinerTask::build(client, bps, block_count, submitter.sender(), receiver.template(), pay_address, stopper).await; + + Arc::new(Self::new(submitter, receiver, miner)) + } +} + +#[async_trait] +impl Task for MinerGroupTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + chain![ + self.submitter.start(stop_signal.clone()), + self.receiver.start(stop_signal.clone()), + self.miner.start(stop_signal.clone()) + ] + .collect() + } +} diff --git a/testing/integration/src/tasks/block/miner.rs b/testing/integration/src/tasks/block/miner.rs new file mode 100644 index 000000000..ae759801e --- /dev/null +++ b/testing/integration/src/tasks/block/miner.rs @@ -0,0 +1,139 @@ +use crate::tasks::{Stopper, Task}; +use async_channel::Sender; +use async_trait::async_trait; +use kaspa_addresses::Address; +use kaspa_core::warn; +use kaspa_grpc_client::GrpcClient; +use kaspa_rpc_core::{api::rpc::RpcApi, GetBlockTemplateResponse, RpcBlock}; +use kaspa_utils::triggers::SingleTrigger; +use parking_lot::Mutex; +use rand::thread_rng; +use rand_distr::{Distribution, Exp}; +use std::{ + cmp::max, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; +use tokio::{task::JoinHandle, time::sleep}; + +pub const COMMUNICATION_DELAY: u64 = 1_000; + +pub struct BlockMinerTask { + client: Arc, + bps: u64, + block_count: usize, + sender: Sender, + template: Arc>, + pay_address: Address, + tx_counter: Arc, + comm_delay: u64, + stopper: Stopper, +} + +impl BlockMinerTask { + pub fn new( + client: Arc, + bps: u64, + block_count: usize, + sender: Sender, + template: Arc>, + pay_address: Address, + stopper: Stopper, + ) -> Self { + Self { + client, + bps, + block_count, + sender, + template, + pay_address, + tx_counter: Default::default(), + comm_delay: COMMUNICATION_DELAY, + stopper, + } + } + + pub async fn build( + client: Arc, + bps: u64, + block_count: usize, + sender: Sender, + template: Arc>, + pay_address: Address, + stopper: Stopper, + ) -> Arc { + Arc::new(Self::new(client, bps, block_count, sender, template, pay_address, stopper)) + } + + pub fn sender(&self) -> Sender { + self.sender.clone() + } + + pub fn template(&self) -> Arc> { + self.template.clone() + } + + pub fn tx_counter(&self) -> Arc { + self.tx_counter.clone() + } +} + +#[async_trait] +impl Task for BlockMinerTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + let client = self.client.clone(); + let block_count = self.block_count; + let sender = self.sender(); + let template = self.template(); + let pay_address = self.pay_address.clone(); + let tx_counter = self.tx_counter(); + let dist: Exp = Exp::new(self.bps as f64).unwrap(); + let comm_delay = self.comm_delay; + let stopper = self.stopper; + let task = tokio::spawn(async move { + warn!("Block miner task starting..."); + for i in 0..block_count { + // Simulate mining time + let timeout = max((dist.sample(&mut thread_rng()) * 1000.0) as u64, 1); + tokio::select! { + biased; + _ = stop_signal.listener.clone() => { + break; + } + _ = sleep(Duration::from_millis(timeout))=> {} + } + + // Read the most up-to-date block template + let mut block = template.lock().block.clone(); + // Use index as nonce to avoid duplicate blocks + block.header.nonce = i as u64; + + let c_template = template.clone(); + let c_client = client.clone(); + let c_pay_address = pay_address.clone(); + tokio::spawn(async move { + // We used the current template so let's refetch a new template with new txs + let response = c_client.get_block_template(c_pay_address, vec![]).await.unwrap(); + *c_template.lock() = response; + }); + + let c_sender = sender.clone(); + tx_counter.fetch_add(block.transactions.len() - 1, Ordering::SeqCst); + tokio::spawn(async move { + // Simulate communication delay. TODO: consider adding gaussian noise + tokio::time::sleep(Duration::from_millis(comm_delay)).await; + let _ = c_sender.send(block).await; + }); + } + if stopper == Stopper::Signal { + stop_signal.trigger.trigger(); + } + sender.close(); + warn!("Block miner task exited"); + }); + vec![task] + } +} diff --git a/testing/integration/src/tasks/block/mod.rs b/testing/integration/src/tasks/block/mod.rs new file mode 100644 index 000000000..1af79b6b3 --- /dev/null +++ b/testing/integration/src/tasks/block/mod.rs @@ -0,0 +1,4 @@ +pub mod group; +pub mod miner; +pub mod submitter; +pub mod template_receiver; diff --git a/testing/integration/src/tasks/block/submitter.rs b/testing/integration/src/tasks/block/submitter.rs new file mode 100644 index 000000000..b57d03269 --- /dev/null +++ b/testing/integration/src/tasks/block/submitter.rs @@ -0,0 +1,73 @@ +use crate::{ + common::daemon::ClientManager, + tasks::{Stopper, Task}, +}; +use async_channel::Sender; +use async_trait::async_trait; +use kaspa_core::warn; +use kaspa_grpc_client::ClientPool; +use kaspa_rpc_core::{api::rpc::RpcApi, RpcBlock}; +use kaspa_utils::triggers::SingleTrigger; +use std::{sync::Arc, time::Duration}; +use tokio::{task::JoinHandle, time::sleep}; + +pub struct BlockSubmitterTask { + pool: ClientPool, + stopper: Stopper, +} + +impl BlockSubmitterTask { + pub fn new(pool: ClientPool, stopper: Stopper) -> Self { + Self { pool, stopper } + } + + pub async fn build(client_manager: Arc, pool_size: usize, stopper: Stopper) -> Arc { + let pool = client_manager.new_client_pool(pool_size, 100).await; + Arc::new(Self::new(pool, stopper)) + } + + pub fn sender(&self) -> Sender { + self.pool.sender() + } +} + +#[async_trait] +impl Task for BlockSubmitterTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + warn!("Block submitter task starting..."); + let mut tasks = self.pool.start(|c, block: RpcBlock| async move { + loop { + match c.submit_block(block.clone(), false).await { + Ok(response) => { + assert_eq!(response.report, kaspa_rpc_core::SubmitBlockReport::Success); + break; + } + Err(_) => { + sleep(Duration::from_millis(50)).await; + } + } + } + false + }); + + let pool_shutdown_listener = self.pool.shutdown_listener(); + let sender = self.sender(); + let stopper = self.stopper; + let shutdown_task = tokio::spawn(async move { + tokio::select! { + _ = stop_signal.listener.clone() => {} + _ = pool_shutdown_listener.clone() => { + if stopper == Stopper::Signal { + stop_signal.trigger.trigger(); + } + } + } + let _ = sender.close(); + pool_shutdown_listener.await; + warn!("Block submitter task exited"); + }); + tasks.push(shutdown_task); + + tasks + } +} diff --git a/testing/integration/src/tasks/block/template_receiver.rs b/testing/integration/src/tasks/block/template_receiver.rs new file mode 100644 index 000000000..daa4cceaa --- /dev/null +++ b/testing/integration/src/tasks/block/template_receiver.rs @@ -0,0 +1,94 @@ +use crate::{ + common::client_notify::ChannelNotify, + tasks::{Stopper, Task}, +}; +use async_trait::async_trait; +use kaspa_addresses::Address; +use kaspa_core::warn; +use kaspa_grpc_client::GrpcClient; +use kaspa_notify::{listener::ListenerId, scope::NewBlockTemplateScope}; +use kaspa_rpc_core::{api::rpc::RpcApi, GetBlockTemplateResponse, Notification}; +use kaspa_utils::{channel::Channel, triggers::SingleTrigger}; +use parking_lot::Mutex; +use std::sync::Arc; +use tokio::task::JoinHandle; + +pub struct BlockTemplateReceiverTask { + client: Arc, + channel: Channel, + template: Arc>, + pay_address: Address, + stopper: Stopper, +} + +impl BlockTemplateReceiverTask { + pub fn new( + client: Arc, + channel: Channel, + response: GetBlockTemplateResponse, + pay_address: Address, + stopper: Stopper, + ) -> Self { + let template = Arc::new(Mutex::new(response)); + Self { client, channel, template, pay_address, stopper } + } + + pub async fn build(client: Arc, pay_address: Address, stopper: Stopper) -> Arc { + let channel = Channel::default(); + client.start(Some(Arc::new(ChannelNotify::new(channel.sender())))).await; + client.start_notify(ListenerId::default(), NewBlockTemplateScope {}.into()).await.unwrap(); + let response = client.get_block_template(pay_address.clone(), vec![]).await.unwrap(); + Arc::new(Self::new(client, channel, response, pay_address, stopper)) + } + + pub fn template(&self) -> Arc> { + self.template.clone() + } +} + +#[async_trait] +impl Task for BlockTemplateReceiverTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + let client = self.client.clone(); + let receiver = self.channel.receiver(); + let pay_address = self.pay_address.clone(); + let template = self.template(); + let stopper = self.stopper; + let task = tokio::spawn(async move { + warn!("Block template receiver task starting..."); + loop { + tokio::select! { + biased; + _ = stop_signal.listener.clone() => { + break; + } + result = receiver.recv() => { + match result { + Ok(notification) => { + match notification { + Notification::NewBlockTemplate(_) => { + // Drain the channel + while receiver.try_recv().is_ok() {} + let response = client.get_block_template(pay_address.clone(), vec![]).await.unwrap(); + *template.lock() = response; + } + _ => panic!(), + } + } + Err(_) => { + break; + } + } + } + } + } + if stopper == Stopper::Signal { + stop_signal.trigger.trigger(); + } + client.stop_notify(ListenerId::default(), NewBlockTemplateScope {}.into()).await.unwrap(); + client.disconnect().await.unwrap(); + warn!("Block template receiver task exited"); + }); + vec![task] + } +} diff --git a/testing/integration/src/tasks/daemon.rs b/testing/integration/src/tasks/daemon.rs new file mode 100644 index 000000000..b800a8b9f --- /dev/null +++ b/testing/integration/src/tasks/daemon.rs @@ -0,0 +1,181 @@ +use crate::{ + common::daemon::{ClientManager, Daemon}, + tasks::Task, +}; +use async_trait::async_trait; +use clap::Parser; +use kaspa_addresses::Address; +use kaspa_consensus_core::network::NetworkType; +use kaspa_core::{trace, warn}; +use kaspa_utils::{fd_budget, triggers::SingleTrigger}; +use kaspad_lib::args::Args; +use std::{iter::once, sync::Arc}; +use tokio::task::JoinHandle; + +/// Arguments for configuring a [`DaemonTask`] +#[derive(Parser, Debug)] +pub struct DaemonArgs { + /// Port of gRPC server + #[arg(long)] + pub rpc: u16, + + /// Port of P2P server + #[arg(long)] + pub p2p: u16, + + /// Preallocated UTXOs private key + #[arg(long, name = "private-key")] + pub private_key: String, + + #[arg(long, name = "stat-file-prefix")] + pub stat_file_prefix: Option, + + #[arg(long, name = "max-tracked-addresses")] + pub max_tracked_addresses: usize, + + #[arg(long)] + pub utxoindex: bool, +} + +impl DaemonArgs { + pub fn new( + rpc: u16, + p2p: u16, + private_key: String, + stat_file_prefix: Option, + max_tracked_addresses: usize, + utxoindex: bool, + ) -> Self { + Self { rpc, p2p, private_key, stat_file_prefix, max_tracked_addresses, utxoindex } + } + + pub fn from_env_args() -> Self { + let mut collect_started: bool = false; + let args = once("test".to_string()).chain(std::env::args().filter(|arg| { + if *arg == "--" { + collect_started = true; + false + } else { + collect_started + } + })); + DaemonArgs::parse_from(args) + } + + pub fn to_command_args(&self, test_name: &str) -> Vec { + let mut args = vec![ + "test".to_owned(), + "--package".to_owned(), + "kaspa-testing-integration".to_owned(), + "--lib".to_owned(), + "--features".to_owned(), + "devnet-prealloc".to_owned(), + "--".to_owned(), + test_name.to_owned(), + "--exact".to_owned(), + "--nocapture".to_owned(), + "--ignored".to_owned(), + "--".to_owned(), + "--rpc".to_owned(), + format!("{}", self.rpc), + "--p2p".to_owned(), + format!("{}", self.p2p), + "--private-key".to_owned(), + format!("{}", self.private_key), + "--max-tracked-addresses".to_owned(), + format!("{}", self.max_tracked_addresses), + ]; + if let Some(ref stat_file_prefix) = self.stat_file_prefix { + args.push("--stat-file-prefix".to_owned()); + args.push(stat_file_prefix.clone()); + } + if self.utxoindex { + args.push("--utxoindex".to_owned()); + } + args + } + + pub fn prealloc_address(&self) -> Address { + let mut private_key_bytes = [0u8; 32]; + faster_hex::hex_decode(self.private_key.as_bytes(), &mut private_key_bytes).unwrap(); + let schnorr_key = secp256k1::KeyPair::from_seckey_slice(secp256k1::SECP256K1, &private_key_bytes).unwrap(); + Address::new( + NetworkType::Simnet.into(), + kaspa_addresses::Version::PubKey, + &schnorr_key.public_key().x_only_public_key().0.serialize(), + ) + } + + #[cfg(feature = "devnet-prealloc")] + pub fn apply_to(&self, args: &mut Args) { + args.rpclisten = Some(format!("0.0.0.0:{}", self.rpc).try_into().unwrap()); + args.listen = Some(format!("0.0.0.0:{}", self.p2p).try_into().unwrap()); + args.prealloc_address = Some(self.prealloc_address().to_string()); + args.max_tracked_addresses = self.max_tracked_addresses; + args.utxoindex = self.utxoindex; + } + + #[cfg(not(feature = "devnet-prealloc"))] + pub fn apply_to(&self, args: &mut Args) { + args.rpclisten = Some(format!("0.0.0.0:{}", self.rpc).try_into().unwrap()); + args.listen = Some(format!("0.0.0.0:{}", self.p2p).try_into().unwrap()); + args.max_tracked_addresses = self.max_tracked_addresses; + args.utxoindex = self.utxoindex; + } +} + +pub struct DaemonTask { + client_manager: Arc, + ready_signal: SingleTrigger, +} + +impl DaemonTask { + pub fn build(client_manager: Arc) -> Arc { + Arc::new(Self { client_manager, ready_signal: SingleTrigger::new() }) + } + + pub fn with_args(args: Args) -> Arc { + let client_manager = Arc::new(ClientManager::new(args)); + Self::build(client_manager) + } + + async fn ready(&self) { + self.ready_signal.listener.clone().await; + } +} + +#[async_trait] +impl Task for DaemonTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + let ready_signal = self.ready_signal.trigger.clone(); + let fd_total_budget = fd_budget::limit(); + let mut daemon = Daemon::with_manager(self.client_manager.clone(), fd_total_budget); + let task = tokio::spawn(async move { + warn!("Daemon task starting..."); + daemon.run(); + + // Wait for the node to initialize before connecting to RPC + daemon.grpc_server_started().await; + ready_signal.trigger(); + + tokio::select! { + biased; + _ = daemon.shutdown_requested() => { + trace!("Daemon core shutdown was requested"); + warn!("Daemon task signaling to stop"); + stop_signal.trigger.trigger(); + } + _ = stop_signal.listener.clone() => { + trace!("Daemon task got a stop signal"); + } + } + daemon.shutdown(); + warn!("Daemon task exited"); + }); + vec![task] + } + + async fn ready(&self) { + self.ready().await + } +} diff --git a/testing/integration/src/tasks/memory_monitor.rs b/testing/integration/src/tasks/memory_monitor.rs new file mode 100644 index 000000000..0f8331837 --- /dev/null +++ b/testing/integration/src/tasks/memory_monitor.rs @@ -0,0 +1,117 @@ +use crate::tasks::Task; +use async_trait::async_trait; +use kaspa_core::{ + info, + task::tick::{TickReason, TickService}, + warn, +}; +use kaspa_utils::triggers::SingleTrigger; +use std::{sync::Arc, time::Duration}; +use tokio::task::JoinHandle; +use workflow_perf_monitor::mem::{get_process_memory_info, ProcessMemoryInfo}; + +pub struct MemoryMonitorTask { + tick_service: Arc, + name: String, + fetch_interval: Duration, + max_memory: u64, +} + +impl MemoryMonitorTask { + pub fn new(tick_service: Arc, name: &str, fetch_interval: Duration, max_memory: u64) -> Self { + Self { tick_service, name: name.to_owned(), fetch_interval, max_memory } + } + + pub fn build(tick_service: Arc, name: &str, fetch_interval: Duration, max_memory: u64) -> Arc { + Arc::new(Self::new(tick_service, name, fetch_interval, max_memory)) + } + + async fn worker(&self) { + #[cfg(feature = "heap")] + let _profiler = dhat::Profiler::builder().file_name("kaspad-heap.json").build(); + + warn!( + "Starting Memory monitor {} with fetch interval of {} and maximum memory of {}", + self.name, + self.fetch_interval.as_secs(), + self.max_memory + ); + while let TickReason::Wakeup = self.tick_service.as_ref().tick(self.fetch_interval).await { + let ProcessMemoryInfo { resident_set_size, virtual_memory_size, .. } = get_process_memory_info().unwrap(); + + if resident_set_size > self.max_memory { + warn!(">>> Resident set memory {} exceeded threshold of {}", resident_set_size, self.max_memory); + #[cfg(feature = "heap")] + { + warn!(">>> Dumping heap profiling data..."); + drop(_profiler); + panic!("Resident set memory {} exceeded threshold of {}", resident_set_size, self.max_memory); + } + } else { + info!( + ">>> Memory monitor {}: virtual image mem {}, resident set mem {}", + self.name, virtual_memory_size, resident_set_size + ); + } + } + warn!("Memory monitor {} with fetch interval of {} exited", self.name, self.fetch_interval.as_secs()); + + // Let the system print final logs before exiting + tokio::time::sleep(Duration::from_millis(500)).await; + } + + pub fn start(self) -> JoinHandle<()> { + tokio::spawn(Box::pin(async move { + self.worker().await; + })) + } +} + +#[async_trait] +impl Task for MemoryMonitorTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + let tick_service = self.tick_service.clone(); + let name = self.name.clone(); + let fetch_interval = self.fetch_interval; + let max_memory = self.max_memory; + let task = tokio::spawn(async move { + #[cfg(feature = "heap")] + let _profiler = dhat::Profiler::builder().file_name("kaspad-heap.json").build(); + + warn!( + "Starting Memory monitor task {} with fetch interval of {} and maximum memory of {}", + name, + fetch_interval.as_secs(), + max_memory + ); + while let TickReason::Wakeup = tick_service.as_ref().tick(fetch_interval).await { + let ProcessMemoryInfo { resident_set_size, virtual_memory_size, .. } = get_process_memory_info().unwrap(); + + if resident_set_size > max_memory { + warn!(">>> Resident set memory {} exceeded threshold of {}", resident_set_size, max_memory); + #[cfg(feature = "heap")] + { + warn!(">>> Dumping heap profiling data..."); + drop(_profiler); + panic!("Resident set memory {} exceeded threshold of {}", resident_set_size, max_memory); + } + } else { + info!( + ">>> Memory monitor {}: virtual image mem {}, resident set mem {}", + name, virtual_memory_size, resident_set_size + ); + } + + if stop_signal.listener.is_triggered() { + break; + } + } + warn!("Memory monitor task {} exited", name); + stop_signal.trigger.trigger(); + + // Let the system print final logs before exiting + tokio::time::sleep(Duration::from_millis(500)).await; + }); + vec![task] + } +} diff --git a/testing/integration/src/tasks/mod.rs b/testing/integration/src/tasks/mod.rs new file mode 100644 index 000000000..e527052a4 --- /dev/null +++ b/testing/integration/src/tasks/mod.rs @@ -0,0 +1,108 @@ +use self::stop::StopTask; +use async_trait::async_trait; +use futures_util::future::join_all; +use itertools::Itertools; +use kaspa_utils::triggers::SingleTrigger; +use std::sync::Arc; +use tokio::task::JoinHandle; + +pub mod block; +pub mod daemon; +pub mod memory_monitor; +pub mod notify; +pub mod stat_recorder; +pub mod stop; +pub mod subscription; +pub mod tick; +pub mod tx; + +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum Stopper { + /// Trigger a stop signal on exit + Signal, + + /// Do nothing on exit + Ignore, +} + +#[async_trait] +pub trait Task: Sync + Send { + fn start(&self, stop_signal: SingleTrigger) -> Vec>; + async fn ready(&self) {} +} + +pub type DynTask = Arc; + +#[derive(Default)] +pub struct TasksRunner { + main: Option, + tasks: Vec, + main_handles: Option>>, + handles: Option>>, + main_stop_signal: SingleTrigger, + stop_signal: SingleTrigger, +} + +impl TasksRunner { + pub fn new(main: Option) -> Self { + Self { + main, + tasks: vec![], + main_handles: None, + handles: None, + main_stop_signal: SingleTrigger::new(), + stop_signal: SingleTrigger::new(), + } + } + + pub fn task(mut self, task: DynTask) -> Self { + self.tasks.push(task); + self + } + + pub fn optional_task(mut self, task: Option) -> Self { + if let Some(task) = task { + self.tasks.push(task); + } + self + } + + /// Start the main task + /// + /// Use this before adding tasks relying on a started main task. + pub async fn launch(mut self) -> Self { + self.run_main().await; + self + } + + async fn run_main(&mut self) { + if let Some(ref main) = self.main { + if self.main_handles.is_none() { + self.tasks.push(StopTask::build(self.main_stop_signal.clone())); + self.main_handles = Some(main.start(self.main_stop_signal.clone())); + main.ready().await; + } + } + } + pub async fn run(&mut self) { + self.run_main().await; + let handles = self.tasks.iter().cloned().flat_map(|x| x.start(self.stop_signal.clone())).collect_vec(); + self.handles = Some(handles); + } + + pub fn stop(&self) { + self.stop_signal.trigger.trigger() + } + + pub async fn join(&mut self) { + if let Some(handles) = self.handles.take() { + join_all(handles).await; + } + + // Send a stop signal to the main task and wait for it to exit + self.main_stop_signal.trigger.trigger(); + if let Some(main_handles) = self.main_handles.take() { + join_all(main_handles).await; + } + } +} diff --git a/testing/integration/src/tasks/notify/mod.rs b/testing/integration/src/tasks/notify/mod.rs new file mode 100644 index 000000000..04bdf80a0 --- /dev/null +++ b/testing/integration/src/tasks/notify/mod.rs @@ -0,0 +1 @@ +pub mod notification_drainer; diff --git a/testing/integration/src/tasks/notify/notification_drainer.rs b/testing/integration/src/tasks/notify/notification_drainer.rs new file mode 100644 index 000000000..cf50b3429 --- /dev/null +++ b/testing/integration/src/tasks/notify/notification_drainer.rs @@ -0,0 +1,43 @@ +use crate::tasks::Task; +use async_trait::async_trait; +use kaspa_core::warn; +use kaspa_grpc_client::GrpcClient; +use kaspa_utils::triggers::SingleTrigger; +use std::{sync::Arc, time::Duration}; +use tokio::{task::JoinHandle, time::sleep}; + +pub struct NotificationDrainerTask { + clients: Vec>, +} + +impl NotificationDrainerTask { + pub fn new(clients: Vec>) -> Self { + Self { clients } + } + + pub fn build(clients: Vec>) -> Arc { + Arc::new(Self::new(clients)) + } +} + +#[async_trait] +impl Task for NotificationDrainerTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + let clients = self.clients.clone(); + let task = tokio::spawn(async move { + warn!("Notification drainer task starting..."); + loop { + tokio::select! { + biased; + _ = stop_signal.listener.clone() => { + break; + } + _ = sleep(Duration::from_secs(1)) => {} + } + clients.iter().for_each(|client| while client.notification_channel_receiver().try_recv().is_ok() {}); + } + warn!("Notification drainer task exited"); + }); + vec![task] + } +} diff --git a/testing/integration/src/tasks/stat_recorder.rs b/testing/integration/src/tasks/stat_recorder.rs new file mode 100644 index 000000000..bacfe82de --- /dev/null +++ b/testing/integration/src/tasks/stat_recorder.rs @@ -0,0 +1,72 @@ +use crate::tasks::{DynTask, Task}; +use async_trait::async_trait; +use kaspa_utils::triggers::SingleTrigger; +use std::{ + io::{BufWriter, Write}, + path::PathBuf, + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::{task::JoinHandle, time::sleep}; +use workflow_perf_monitor::mem::{get_process_memory_info, ProcessMemoryInfo}; + +pub struct StatRecorderTask { + tick: Duration, + folder: String, + file_prefix: String, + timestamp: bool, +} + +impl StatRecorderTask { + pub fn build(tick: Duration, folder: String, file_prefix: String, timestamp: bool) -> Arc { + Arc::new(Self { tick, folder, file_prefix, timestamp }) + } + + pub fn optional(tick: Duration, folder: String, file_prefix: Option, timestamp: bool) -> Option { + file_prefix.map(|file_prefix| Self::build(tick, folder, file_prefix, timestamp) as DynTask) + } + + pub fn file_name(&self) -> String { + match self.timestamp { + true => format!("{}-{}.csv", self.file_prefix, chrono::Local::now().format("%Y-%m-%d %H-%M-%S")), + false => format!("{}.csv", self.file_prefix), + } + } +} + +#[async_trait] +impl Task for StatRecorderTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + let folder = self.folder.clone(); + let file_name = self.file_name(); + let tick = self.tick; + let task = tokio::spawn(async move { + kaspa_core::warn!("Stat recorder task starting..."); + std::fs::create_dir_all(PathBuf::from(&folder)).unwrap(); + { + let file_path = PathBuf::from(&folder).join(file_name).into_os_string(); + kaspa_core::warn!("Recording memory metrics into file {}", file_path.to_str().unwrap()); + let f = std::fs::File::create(file_path).unwrap(); + let mut f = BufWriter::new(f); + let start_time = Instant::now(); + let mut stopwatch = start_time; + loop { + tokio::select! { + biased; + _ = stop_signal.listener.clone() => { + kaspa_core::trace!("Leaving stat recorder loop"); + break; + } + _ = sleep(stopwatch + tick - Instant::now()) => {} + } + stopwatch = Instant::now(); + let ProcessMemoryInfo { resident_set_size, .. } = get_process_memory_info().unwrap(); + writeln!(f, "{}, {}", (stopwatch - start_time).as_millis() as f64 / 1000.0 / 60.0, resident_set_size).unwrap(); + f.flush().unwrap(); + } + } + kaspa_core::warn!("Stat recorder task exited"); + }); + vec![task] + } +} diff --git a/testing/integration/src/tasks/stop.rs b/testing/integration/src/tasks/stop.rs new file mode 100644 index 000000000..ee1b381f1 --- /dev/null +++ b/testing/integration/src/tasks/stop.rs @@ -0,0 +1,44 @@ +use crate::tasks::Task; +use async_trait::async_trait; +use kaspa_core::{trace, warn}; +use kaspa_utils::triggers::SingleTrigger; +use std::sync::Arc; +use tokio::task::JoinHandle; + +pub(super) struct StopTask { + main_stop_signal: SingleTrigger, +} + +impl StopTask { + pub fn new(main_stop_signal: SingleTrigger) -> Self { + Self { main_stop_signal } + } + + pub fn build(main_stop_signal: SingleTrigger) -> Arc { + Arc::new(Self::new(main_stop_signal)) + } +} + +#[async_trait] +impl Task for StopTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + let main_stop_signal = self.main_stop_signal.clone(); + let task = tokio::spawn(async move { + warn!("Stop propagator task starting..."); + tokio::select! { + _ = main_stop_signal.listener.clone() => { + trace!("The main stop signal has been triggered"); + if !stop_signal.listener.is_triggered() { + warn!("Stop propagator sending a stop signal to the sub-tasks..."); + } + stop_signal.trigger.trigger(); + } + _ = stop_signal.listener.clone() => { + trace!("The stop signal has been triggered, no need to propagate from main to sub-tasks"); + } + } + warn!("Stop propagator task exited"); + }); + vec![task] + } +} diff --git a/testing/integration/src/tasks/subscription/address_subscriber.rs b/testing/integration/src/tasks/subscription/address_subscriber.rs new file mode 100644 index 000000000..824bec355 --- /dev/null +++ b/testing/integration/src/tasks/subscription/address_subscriber.rs @@ -0,0 +1,142 @@ +use crate::tasks::{subscription::submitter::SubscribeCommand, Task}; +use async_channel::Sender; +use async_trait::async_trait; +use kaspa_addresses::Address; +use kaspa_core::warn; +use kaspa_grpc_client::GrpcClient; +use kaspa_utils::triggers::SingleTrigger; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::{sync::oneshot::channel, task::JoinHandle, time::sleep}; + +pub struct AddressSubscriberTask { + clients: Vec>, + addresses: Vec>>, + command_sender: Sender, + initial_secs_delay: u64, + cycle_seconds: u64, + max_cycles: usize, +} + +impl AddressSubscriberTask { + pub fn new( + clients: Vec>, + addresses: Vec>>, + command_sender: Sender, + initial_secs_delay: u64, + cycle_seconds: u64, + max_cycles: usize, + ) -> Self { + assert_eq!(clients.len(), addresses.len()); + Self { clients, addresses, command_sender, initial_secs_delay, cycle_seconds, max_cycles } + } + + pub fn build( + clients: Vec>, + addresses: Vec>>, + command_sender: Sender, + initial_secs_delay: u64, + cycle_seconds: u64, + max_cycles: usize, + ) -> Arc { + Arc::new(Self::new(clients, addresses, command_sender, initial_secs_delay, cycle_seconds, max_cycles)) + } + + pub fn clients(&self) -> &[Arc] { + &self.clients + } +} + +#[async_trait] +impl Task for AddressSubscriberTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + let clients = self.clients.clone(); + let addresses = self.addresses.clone(); + let sender = self.command_sender.clone(); + let initial_secs_delay = self.initial_secs_delay; + let cycle_seconds = self.cycle_seconds; + let max_cycles = self.max_cycles; + let task = tokio::spawn(async move { + warn!("Address subscriber task starting..."); + let mut cycle: usize = 0; + let mut stopwatch = Instant::now(); + loop { + if cycle == 0 { + tokio::select! { + biased; + _ = stop_signal.listener.clone() => { + break; + } + _ = sleep(stopwatch + Duration::from_secs(initial_secs_delay) - Instant::now()) => {} + } + stopwatch = Instant::now(); + } + cycle += 1; + + if cycle <= max_cycles { + warn!("Cycle {cycle} - Starting UTXOs notifications..."); + let (tx, rx) = channel(); + sender.send(SubscribeCommand::RegisterJob((clients.len(), tx))).await.unwrap(); + let registration = rx.await.unwrap(); + for (i, client) in clients.iter().cloned().enumerate() { + sender + .send(SubscribeCommand::StartUtxosChanged((registration.id, client, addresses[i].clone()))) + .await + .unwrap(); + } + tokio::select! { + biased; + _ = stop_signal.listener.clone() => { + break; + } + _ = registration.complete => {} + } + warn!("Cycle {cycle} - UTXOs notifications started"); + } + + tokio::select! { + biased; + _ = stop_signal.listener.clone() => { + break; + } + _ = sleep(stopwatch + Duration::from_secs(cycle_seconds - (cycle_seconds / 3)) - Instant::now()) => {} + } + stopwatch = Instant::now(); + + if cycle < max_cycles { + warn!("Cycle {cycle} - Stopping UTXOs notifications..."); + let (tx, rx) = channel(); + sender.send(SubscribeCommand::RegisterJob((clients.len(), tx))).await.unwrap(); + let registration = rx.await.unwrap(); + for client in clients.iter().cloned() { + sender.send(SubscribeCommand::StopUtxosChanged((registration.id, client))).await.unwrap(); + } + tokio::select! { + biased; + _ = stop_signal.listener.clone() => { + break; + } + _ = registration.complete => {} + } + warn!("Cycle {cycle} - UTXOs notifications stopped"); + } + + tokio::select! { + biased; + _ = stop_signal.listener.clone() => { + break; + } + _ = sleep(stopwatch + Duration::from_secs(cycle_seconds / 3) - Instant::now()) => {} + } + stopwatch = Instant::now(); + } + for client in clients.iter() { + client.disconnect().await.unwrap(); + } + warn!("Address subscriber task exited"); + }); + vec![task] + } +} diff --git a/testing/integration/src/tasks/subscription/basic_subscriber.rs b/testing/integration/src/tasks/subscription/basic_subscriber.rs new file mode 100644 index 000000000..3e4aebd3c --- /dev/null +++ b/testing/integration/src/tasks/subscription/basic_subscriber.rs @@ -0,0 +1,80 @@ +use crate::tasks::{subscription::submitter::SubscribeCommand, Task}; +use async_channel::Sender; +use async_trait::async_trait; +use kaspa_core::warn; +use kaspa_grpc_client::GrpcClient; +use kaspa_notify::scope::Scope; +use kaspa_utils::triggers::SingleTrigger; +use std::{sync::Arc, time::Duration}; +use tokio::{sync::oneshot::channel, task::JoinHandle, time::sleep}; + +pub struct BasicSubscriberTask { + clients: Vec>, + subscriptions: Vec, + command_sender: Sender, + initial_secs_delay: u64, +} + +impl BasicSubscriberTask { + pub fn new( + clients: Vec>, + subscriptions: Vec, + command_sender: Sender, + initial_secs_delay: u64, + ) -> Self { + Self { clients, subscriptions, command_sender, initial_secs_delay } + } + + pub fn build( + clients: Vec>, + subscriptions: Vec, + command_sender: Sender, + initial_secs_delay: u64, + ) -> Arc { + Arc::new(Self::new(clients, subscriptions, command_sender, initial_secs_delay)) + } + + pub fn clients(&self) -> &[Arc] { + &self.clients + } +} + +#[async_trait] +impl Task for BasicSubscriberTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + let clients = self.clients.clone(); + let subscriptions = self.subscriptions.clone(); + let sender = self.command_sender.clone(); + let initial_secs_delay = self.initial_secs_delay; + let task = tokio::spawn(async move { + tokio::select! { + biased; + _ = stop_signal.listener.clone() => { + return; + } + _ = sleep(Duration::from_secs(initial_secs_delay)) => {} + } + warn!("Basic subscriber task starting..."); + 'outer: for scope in subscriptions { + let (tx, rx) = channel(); + sender.send(SubscribeCommand::RegisterJob((clients.len(), tx))).await.unwrap(); + let registration = rx.await.unwrap(); + for client in clients.iter().cloned() { + if stop_signal.listener.is_triggered() { + break 'outer; + } + sender.send(SubscribeCommand::Start((registration.id, client, scope.clone()))).await.unwrap(); + } + tokio::select! { + biased; + _ = stop_signal.listener.clone() => { + break; + } + _ = registration.complete => {} + } + } + warn!("Basic subscriber task exited"); + }); + vec![task] + } +} diff --git a/testing/integration/src/tasks/subscription/group.rs b/testing/integration/src/tasks/subscription/group.rs new file mode 100644 index 000000000..b652fe3a2 --- /dev/null +++ b/testing/integration/src/tasks/subscription/group.rs @@ -0,0 +1,86 @@ +use crate::{ + common::daemon::ClientManager, + tasks::{ + notify::notification_drainer::NotificationDrainerTask, + subscription::{ + address_subscriber::AddressSubscriberTask, basic_subscriber::BasicSubscriberTask, submitter::SubscriptionSubmitterTask, + }, + Task, + }, +}; +use async_trait::async_trait; +use itertools::{chain, Itertools}; +use kaspa_addresses::Address; +use kaspa_notify::scope::Scope; +use kaspa_utils::triggers::SingleTrigger; +use std::sync::Arc; +use tokio::task::JoinHandle; + +pub struct SubscriberGroupTask { + submitter: Arc, + basic_subscriber: Arc, + address_subscriber: Arc, + notification_drainer: Arc, +} + +impl SubscriberGroupTask { + pub fn new( + submitter: Arc, + basic_subscriber: Arc, + address_subscriber: Arc, + notification_drainer: Arc, + ) -> Self { + Self { submitter, basic_subscriber, address_subscriber, notification_drainer } + } + + pub async fn build( + client_manager: Arc, + workers: usize, + bps: u64, + basic_subscriptions: Vec, + basic_initial_secs_delay: u64, + addresses: Vec>>, + address_initial_secs_delay: u64, + address_cycle_seconds: u64, + address_max_cycles: usize, + ) -> Arc { + // Clients + assert!(!addresses.is_empty()); + let clients = client_manager.new_clients(addresses.len()).await.into_iter().map(Arc::new).collect_vec(); + + // Block submitter + let submitter = SubscriptionSubmitterTask::build(workers, addresses.len(), bps); + + // Basic subscriber + let basic_subscriber = + BasicSubscriberTask::build(clients.clone(), basic_subscriptions, submitter.sender(), basic_initial_secs_delay); + + // Address subscriber + let address_subscriber = AddressSubscriberTask::build( + clients.clone(), + addresses, + submitter.sender(), + address_initial_secs_delay, + address_cycle_seconds, + address_max_cycles, + ); + + // Notification drainer + let notification_drainer = NotificationDrainerTask::build(clients); + + Arc::new(Self::new(submitter, basic_subscriber, address_subscriber, notification_drainer)) + } +} + +#[async_trait] +impl Task for SubscriberGroupTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + chain![ + self.submitter.start(stop_signal.clone()), + self.basic_subscriber.start(stop_signal.clone()), + self.address_subscriber.start(stop_signal.clone()), + self.notification_drainer.start(stop_signal.clone()), + ] + .collect() + } +} diff --git a/testing/integration/src/tasks/subscription/mod.rs b/testing/integration/src/tasks/subscription/mod.rs new file mode 100644 index 000000000..1519e7738 --- /dev/null +++ b/testing/integration/src/tasks/subscription/mod.rs @@ -0,0 +1,4 @@ +pub mod address_subscriber; +pub mod basic_subscriber; +pub mod group; +pub mod submitter; diff --git a/testing/integration/src/tasks/subscription/submitter.rs b/testing/integration/src/tasks/subscription/submitter.rs new file mode 100644 index 000000000..2d46ef16a --- /dev/null +++ b/testing/integration/src/tasks/subscription/submitter.rs @@ -0,0 +1,176 @@ +use crate::tasks::Task; +use async_channel::Sender; +use async_trait::async_trait; +use itertools::Itertools; +use kaspa_addresses::Address; +use kaspa_core::warn; +use kaspa_grpc_client::GrpcClient; +use kaspa_notify::scope::{Scope, UtxosChangedScope}; +use kaspa_rpc_core::api::rpc::RpcApi; +use kaspa_utils::{channel::Channel, triggers::SingleTrigger}; +use parking_lot::Mutex; +use rand::thread_rng; +use rand_distr::{Distribution, Exp}; +use std::{cmp::max, collections::HashMap, sync::Arc, time::Duration}; +use tokio::{ + sync::oneshot::{channel as oneshot_channel, Receiver as OneshotReceiver, Sender as OneshotSender}, + task::JoinHandle, + time::sleep, +}; + +pub type JobId = u64; +pub type Count = usize; + +pub struct Registration { + pub id: JobId, + pub complete: OneshotReceiver<()>, +} + +impl Registration { + pub fn new(id: JobId, complete: OneshotReceiver<()>) -> Self { + Self { id, complete } + } +} + +pub enum SubscribeCommand { + RegisterJob((Count, OneshotSender)), + Start((JobId, Arc, Scope)), + Stop((JobId, Arc, Scope)), + StartUtxosChanged((JobId, Arc, Arc>)), + StopUtxosChanged((JobId, Arc)), +} + +struct Job { + count: Count, + feedback: OneshotSender<()>, +} + +impl Job { + fn new(count: Count, feedback: OneshotSender<()>) -> Self { + Self { count, feedback } + } +} + +#[derive(Default)] +struct JobRegister { + jobs: HashMap, + max_id: JobId, +} + +impl JobRegister { + fn register(&mut self, count: Count) -> Registration { + self.max_id += 1; + let id = self.max_id; + let (feedback, complete) = oneshot_channel(); + let job = Job::new(count, feedback); + self.jobs.insert(id, job); + Registration::new(id, complete) + } + + fn dec_count(&mut self, id: JobId) { + let job = self.jobs.get_mut(&id).unwrap(); + job.count -= 1; + if job.count > 0 { + return; + } + let (_, job) = self.jobs.remove_entry(&id).unwrap(); + let _ = job.feedback.send(()); + } +} + +pub struct SubscriptionSubmitterTask { + workers: usize, + distribution_channel: Channel, + bps: u64, + register: Arc>, +} + +impl SubscriptionSubmitterTask { + pub fn new(workers: usize, distribution_channel_capacity: usize, bps: u64) -> Self { + let distribution_channel = Channel::bounded(distribution_channel_capacity); + let register = Default::default(); + Self { workers, distribution_channel, bps, register } + } + + pub fn build(workers: usize, distribution_channel_capacity: usize, bps: u64) -> Arc { + Arc::new(Self::new(workers, distribution_channel_capacity, bps)) + } + + pub fn sender(&self) -> Sender { + self.distribution_channel.sender() + } + + pub fn close(&self) { + self.distribution_channel.close() + } +} + +#[async_trait] +impl Task for SubscriptionSubmitterTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + warn!("Subscription submitter task starting..."); + let distribution: Exp = Exp::new(self.bps as f64).unwrap(); + let mut tasks = (0..self.workers) + .map(|_| { + let rx = self.distribution_channel.receiver(); + let dist = distribution; + let register = self.register.clone(); + tokio::spawn(async move { + while let Ok(command) = rx.recv().await { + match command { + SubscribeCommand::RegisterJob((count, sender)) => { + assert!(count > 0); + let registration = register.lock().register(count); + let _ = sender.send(registration); + } + SubscribeCommand::Start((id, client, scope)) => { + client.start_notify(0, scope).await.unwrap(); + register.lock().dec_count(id); + } + SubscribeCommand::Stop((id, client, scope)) => { + client.stop_notify(0, scope).await.unwrap(); + register.lock().dec_count(id); + } + SubscribeCommand::StartUtxosChanged((id, client, addresses)) => loop { + match client.start_notify(0, UtxosChangedScope::new((*addresses).clone()).into()).await { + Ok(_) => { + register.lock().dec_count(id); + break; + } + Err(err) => { + warn!("Failed to start a subscription with {} addresses: {}", addresses.len(), err); + let timeout = max((dist.sample(&mut thread_rng()) * 200.0) as u64, 1); + sleep(Duration::from_millis(timeout)).await; + } + } + }, + SubscribeCommand::StopUtxosChanged((id, client)) => loop { + match client.stop_notify(0, UtxosChangedScope::new(vec![]).into()).await { + Ok(_) => { + register.lock().dec_count(id); + break; + } + Err(err) => { + warn!("Failed to stop a subscription: {}", err); + let timeout = max((dist.sample(&mut thread_rng()) * 250.0) as u64, 1); + sleep(Duration::from_millis(timeout)).await; + } + } + }, + } + } + }) + }) + .collect_vec(); + + let sender = self.sender(); + let shutdown_task = tokio::spawn(async move { + stop_signal.listener.await; + let _ = sender.close(); + warn!("Subscription submitter task exited"); + }); + tasks.push(shutdown_task); + + tasks + } +} diff --git a/testing/integration/src/tasks/tick.rs b/testing/integration/src/tasks/tick.rs new file mode 100644 index 000000000..4b3927c93 --- /dev/null +++ b/testing/integration/src/tasks/tick.rs @@ -0,0 +1,38 @@ +use crate::tasks::Task; +use async_trait::async_trait; +use kaspa_core::{task::tick::TickService, warn}; +use kaspa_utils::triggers::SingleTrigger; +use std::sync::Arc; +use tokio::task::JoinHandle; + +pub struct TickTask { + tick_service: Arc, +} + +impl TickTask { + pub fn new(tick_service: Arc) -> Self { + Self { tick_service } + } + + pub fn build(tick_service: Arc) -> Arc { + Arc::new(Self::new(tick_service)) + } + + pub fn service(&self) -> Arc { + self.tick_service.clone() + } +} + +#[async_trait] +impl Task for TickTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + let tick_service = self.service(); + let task = tokio::spawn(async move { + warn!("Tick task starting..."); + stop_signal.listener.await; + tick_service.shutdown(); + warn!("Tick task exited"); + }); + vec![task] + } +} diff --git a/testing/integration/src/tasks/tx/group.rs b/testing/integration/src/tasks/tx/group.rs new file mode 100644 index 000000000..210c35ac7 --- /dev/null +++ b/testing/integration/src/tasks/tx/group.rs @@ -0,0 +1,50 @@ +use crate::{ + common::daemon::ClientManager, + tasks::{ + tx::{sender::TransactionSenderTask, submitter::TransactionSubmitterTask}, + Stopper, Task, + }, +}; +use async_trait::async_trait; +use itertools::chain; +use kaspa_consensus_core::tx::Transaction; +use kaspa_utils::triggers::SingleTrigger; +use std::sync::Arc; +use tokio::task::JoinHandle; + +pub struct TxSenderGroupTask { + submitter: Arc, + sender: Arc, +} + +impl TxSenderGroupTask { + pub fn new(submitter: Arc, sender: Arc) -> Self { + Self { submitter, sender } + } + + pub async fn build( + client_manager: Arc, + submitter_pool_size: usize, + allow_orphan: bool, + txs: Vec>, + tps_pressure: u64, + mempool_target: u64, + stopper: Stopper, + ) -> Arc { + // Tx submitter + let submitter = TransactionSubmitterTask::build(client_manager.clone(), submitter_pool_size, allow_orphan, stopper).await; + + // Tx sender + let client = Arc::new(client_manager.new_client().await); + let sender = TransactionSenderTask::build(client, txs, tps_pressure, mempool_target, submitter.sender(), stopper).await; + + Arc::new(Self::new(submitter, sender)) + } +} + +#[async_trait] +impl Task for TxSenderGroupTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + chain![self.submitter.start(stop_signal.clone()), self.sender.start(stop_signal.clone())].collect() + } +} diff --git a/testing/integration/src/tasks/tx/mod.rs b/testing/integration/src/tasks/tx/mod.rs new file mode 100644 index 000000000..5723e259d --- /dev/null +++ b/testing/integration/src/tasks/tx/mod.rs @@ -0,0 +1,3 @@ +pub mod group; +pub mod sender; +pub mod submitter; diff --git a/testing/integration/src/tasks/tx/sender.rs b/testing/integration/src/tasks/tx/sender.rs new file mode 100644 index 000000000..26a334a76 --- /dev/null +++ b/testing/integration/src/tasks/tx/sender.rs @@ -0,0 +1,129 @@ +use crate::tasks::{tx::submitter::IndexedTransaction, Stopper, Task}; +use async_channel::Sender; +use async_trait::async_trait; +use kaspa_consensus_core::tx::Transaction; +use kaspa_core::{info, warn}; +use kaspa_grpc_client::GrpcClient; +use kaspa_rpc_core::api::rpc::RpcApi; +use kaspa_utils::triggers::SingleTrigger; +use std::{sync::Arc, time::Duration}; +use tokio::{ + task::JoinHandle, + time::{sleep, Instant}, +}; + +pub struct TransactionSenderTask { + client: Arc, + txs: Vec>, + tps_pressure: u64, + mempool_target: u64, + sender: Sender, + stopper: Stopper, +} + +impl TransactionSenderTask { + const UNREGULATED_TPS: u64 = u64::MAX; + + pub fn new( + client: Arc, + txs: Vec>, + tps_pressure: u64, + mempool_target: u64, + sender: Sender, + stopper: Stopper, + ) -> Self { + Self { client, txs, tps_pressure, mempool_target, sender, stopper } + } + + pub async fn build( + client: Arc, + txs: Vec>, + tps_pressure: u64, + mempool_target: u64, + sender: Sender, + stopper: Stopper, + ) -> Arc { + Arc::new(Self::new(client, txs, tps_pressure, mempool_target, sender, stopper)) + } + + pub fn sender(&self) -> Sender { + self.sender.clone() + } +} + +#[async_trait] +impl Task for TransactionSenderTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + let client = self.client.clone(); + let txs = self.txs.clone(); + let regulated_tps_pressure = self.tps_pressure; + let mempool_target = self.mempool_target; + let mut tps_pressure = if mempool_target < u64::MAX { Self::UNREGULATED_TPS } else { regulated_tps_pressure }; + // let mut tps_pressure = regulated_tps_pressure; + let sender = self.sender(); + let stopper = self.stopper; + let mut last_log_time = Instant::now() - Duration::from_secs(5); + let mut log_index = 0; + let task = tokio::spawn(async move { + warn!("Tx sender task starting..."); + for (i, tx) in txs.into_iter().enumerate() { + if tps_pressure != Self::UNREGULATED_TPS { + sleep(Duration::from_secs_f64(1.0 / tps_pressure as f64)).await; + } + if last_log_time.elapsed() > Duration::from_millis(100) { + let mut mempool_size = client.get_info().await.unwrap().mempool_size; + if log_index % 10 == 0 { + info!("Mempool size: {:#?}, txs submitted: {}", mempool_size, i); + } + log_index += 1; + last_log_time = Instant::now(); + + if mempool_size > (mempool_target as f32 * 1.05) as u64 { + if tps_pressure != regulated_tps_pressure { + warn!("Applying TPS pressure"); + } + tps_pressure = regulated_tps_pressure; + while mempool_size > mempool_target { + sleep(Duration::from_millis(100)).await; + mempool_size = client.get_info().await.unwrap().mempool_size; + if log_index % 10 == 0 { + info!("Mempool size: {:#?} (targeting {:#?}), txs submitted: {}", mempool_size, mempool_target, i); + } + log_index += 1; + } + } + } + match sender.send((i, tx)).await { + Ok(_) => {} + Err(err) => { + kaspa_core::error!("Tx sender channel returned error {err}"); + break; + } + } + if stop_signal.listener.is_triggered() { + break; + } + } + + kaspa_core::warn!("Tx sender task, waiting for mempool to drain.."); + let mut prev_mempool_size = u64::MAX; + loop { + let mempool_size = client.get_info().await.unwrap().mempool_size; + info!("Mempool size: {:#?}", mempool_size); + if mempool_size == 0 || mempool_size == prev_mempool_size { + break; + } + prev_mempool_size = mempool_size; + sleep(Duration::from_secs(1)).await; + } + if stopper == Stopper::Signal { + warn!("Tx sender task signaling to stop"); + stop_signal.trigger.trigger(); + } + sender.close(); + client.disconnect().await.unwrap(); + warn!("Tx sender task exited"); + }); + vec![task] + } +} diff --git a/testing/integration/src/tasks/tx/submitter.rs b/testing/integration/src/tasks/tx/submitter.rs new file mode 100644 index 000000000..81b1e35e4 --- /dev/null +++ b/testing/integration/src/tasks/tx/submitter.rs @@ -0,0 +1,97 @@ +use crate::{ + common::daemon::ClientManager, + tasks::{Stopper, Task}, +}; +use async_channel::Sender; +use async_trait::async_trait; +use kaspa_consensus_core::tx::Transaction; +use kaspa_core::{error, warn}; +use kaspa_grpc_client::ClientPool; +use kaspa_rpc_core::{api::rpc::RpcApi, RpcError}; +use kaspa_utils::triggers::SingleTrigger; +use std::{sync::Arc, time::Duration}; +use tokio::{task::JoinHandle, time::sleep}; + +pub type IndexedTransaction = (usize, Arc); + +/// Transaction submitter +/// +/// Pay close attention to the submission dynamic and its effect on orphans. The safe configuration is +/// a single worker and disallowing orphans. If multiple workers are configured, `allow_orphan` should +/// be set to true unless some special test use case (like fully independent txs) is requiring to +/// disallow orphans so the test can fail. +pub struct TransactionSubmitterTask { + pool: ClientPool, + allow_orphan: bool, + stopper: Stopper, +} + +impl TransactionSubmitterTask { + const MAX_ATTEMPTS: usize = 5; + + pub fn new(pool: ClientPool, allow_orphan: bool, stopper: Stopper) -> Self { + Self { pool, allow_orphan, stopper } + } + + pub async fn build(client_manager: Arc, pool_size: usize, allow_orphan: bool, stopper: Stopper) -> Arc { + let pool = client_manager.new_client_pool(pool_size, 100).await; + Arc::new(Self::new(pool, allow_orphan, stopper)) + } + + pub fn sender(&self) -> Sender { + self.pool.sender() + } +} + +#[async_trait] +impl Task for TransactionSubmitterTask { + fn start(&self, stop_signal: SingleTrigger) -> Vec> { + warn!("Transaction submitter task starting..."); + let mut tasks = match self.allow_orphan { + false => self.pool.start(|c, (i, tx)| async move { + for attempt in 0..Self::MAX_ATTEMPTS { + match c.submit_transaction(tx.as_ref().into(), false).await { + Ok(_) => { + return false; + } + Err(RpcError::General(msg)) if msg.contains("orphan") => { + error!("Transaction {i}: submit attempt #{attempt} failed"); + error!("\n\n\n{msg}\n\n"); + sleep(Duration::from_millis(50)).await; + } + Err(e) => panic!("{e}"), + } + } + panic!("Failed to submit transaction {i} after {} attempts", Self::MAX_ATTEMPTS); + }), + true => self.pool.start(|c, (_, tx)| async move { + match c.submit_transaction(tx.as_ref().into(), true).await { + Ok(_) => {} + Err(e) => panic!("{e}"), + } + false + }), + }; + + let pool_shutdown_listener = self.pool.shutdown_listener(); + let sender = self.sender(); + let stopper = self.stopper; + let shutdown_task = tokio::spawn(async move { + tokio::select! { + _ = stop_signal.listener.clone() => {} + _ = pool_shutdown_listener.clone() => { + if stopper == Stopper::Signal { + warn!("Transaction submitter task signaling to stop"); + stop_signal.trigger.trigger(); + } + } + } + let _ = sender.close(); + pool_shutdown_listener.await; + warn!("Transaction submitter task exited"); + }); + tasks.push(shutdown_task); + + tasks + } +} diff --git a/utils/tower/src/middleware.rs b/utils/tower/src/middleware.rs index 56a8a1030..727d8ca47 100644 --- a/utils/tower/src/middleware.rs +++ b/utils/tower/src/middleware.rs @@ -33,7 +33,7 @@ impl CountBytesBody { impl HttpBody for CountBytesBody where - B: HttpBody, + B: HttpBody + Default, { type Data = B::Data; type Error = B::Error; @@ -66,6 +66,15 @@ where } } +impl Default for CountBytesBody +where + B: HttpBody + Default, +{ + fn default() -> Self { + Self { inner: Default::default(), counter: Default::default() } + } +} + pub fn measure_request_body_size_layer( bytes_sent_counter: Arc, f: F, diff --git a/wallet/core/src/tests/rpc_core_mock.rs b/wallet/core/src/tests/rpc_core_mock.rs index 21971bf2e..6c335d59a 100644 --- a/wallet/core/src/tests/rpc_core_mock.rs +++ b/wallet/core/src/tests/rpc_core_mock.rs @@ -3,9 +3,11 @@ use crate::imports::*; use async_channel::{unbounded, Receiver}; use async_trait::async_trait; use kaspa_notify::events::EVENT_TYPE_ARRAY; -use kaspa_notify::listener::ListenerId; +use kaspa_notify::listener::{ListenerId, ListenerLifespan}; use kaspa_notify::notifier::{Notifier, Notify}; use kaspa_notify::scope::Scope; +use kaspa_notify::subscription::context::SubscriptionContext; +use kaspa_notify::subscription::{MutationPolicies, UtxosChangedMutationPolicy}; use kaspa_rpc_core::api::ctl::RpcCtl; use kaspa_rpc_core::{api::rpc::RpcApi, *}; use kaspa_rpc_core::{notify::connection::ChannelConnection, RpcResult}; @@ -27,7 +29,19 @@ pub struct RpcCoreMock { impl RpcCoreMock { pub fn new() -> Self { - Self::default() + let (sync_sender, sync_receiver) = unbounded(); + let policies = MutationPolicies::new(UtxosChangedMutationPolicy::AddressSet); + let core_notifier: Arc = Arc::new(Notifier::with_sync( + "rpc-core", + EVENT_TYPE_ARRAY[..].into(), + vec![], + vec![], + SubscriptionContext::new(), + 10, + policies, + Some(sync_sender), + )); + Self { core_notifier, _sync_receiver: sync_receiver, ctl: RpcCtl::new() } } pub fn core_notifier(&self) -> Arc { @@ -62,10 +76,7 @@ impl RpcCoreMock { impl Default for RpcCoreMock { fn default() -> Self { - let (sync_sender, sync_receiver) = unbounded(); - let core_notifier: Arc = - Arc::new(Notifier::with_sync("rpc-core", EVENT_TYPE_ARRAY[..].into(), vec![], vec![], 10, Some(sync_sender))); - Self { core_notifier, _sync_receiver: sync_receiver, ctl: RpcCtl::new() } + Self::new() } } @@ -238,7 +249,7 @@ impl RpcApi for RpcCoreMock { // Notification API fn register_new_listener(&self, connection: ChannelConnection) -> ListenerId { - self.core_notifier.register_new_listener(connection) + self.core_notifier.register_new_listener(connection, ListenerLifespan::Dynamic) } async fn unregister_listener(&self, id: ListenerId) -> RpcResult<()> { diff --git a/wallet/core/src/utxo/processor.rs b/wallet/core/src/utxo/processor.rs index 4966105f6..1f38d0a41 100644 --- a/wallet/core/src/utxo/processor.rs +++ b/wallet/core/src/utxo/processor.rs @@ -194,8 +194,8 @@ impl UtxoProcessor { if self.is_connected() { if !addresses.is_empty() { let addresses = addresses.into_iter().map(|address| (*address).clone()).collect::>(); - let utxos_changed_scope = UtxosChangedScope { addresses }; - self.rpc_api().start_notify(self.listener_id()?, Scope::UtxosChanged(utxos_changed_scope)).await?; + let utxos_changed_scope = UtxosChangedScope::new(addresses); + self.rpc_api().start_notify(self.listener_id()?, utxos_changed_scope.into()).await?; } else { log_error!("registering empty address list!"); } @@ -211,8 +211,8 @@ impl UtxoProcessor { if self.is_connected() { if !addresses.is_empty() { let addresses = addresses.into_iter().map(|address| (*address).clone()).collect::>(); - let utxos_changed_scope = UtxosChangedScope { addresses }; - self.rpc_api().stop_notify(self.listener_id()?, Scope::UtxosChanged(utxos_changed_scope)).await?; + let utxos_changed_scope = UtxosChangedScope::new(addresses); + self.rpc_api().stop_notify(self.listener_id()?, utxos_changed_scope.into()).await?; } else { log_error!("unregistering empty address list!"); } @@ -482,9 +482,11 @@ impl UtxoProcessor { } async fn register_notification_listener(&self) -> Result<()> { - let listener_id = self - .rpc_api() - .register_new_listener(ChannelConnection::new(self.inner.notification_channel.sender.clone(), ChannelType::Persistent)); + let listener_id = self.rpc_api().register_new_listener(ChannelConnection::new( + "utxo processor", + self.inner.notification_channel.sender.clone(), + ChannelType::Persistent, + )); *self.inner.listener_id.lock().unwrap() = Some(listener_id); self.rpc_api().start_notify(listener_id, Scope::VirtualDaaScoreChanged(VirtualDaaScoreChangedScope {})).await?; diff --git a/wallet/core/src/wallet/mod.rs b/wallet/core/src/wallet/mod.rs index 68c25d8b9..301ba1be5 100644 --- a/wallet/core/src/wallet/mod.rs +++ b/wallet/core/src/wallet/mod.rs @@ -114,8 +114,12 @@ impl Wallet { } pub fn try_with_wrpc(store: Arc, network_id: Option) -> Result { - let rpc_client = - Arc::new(KaspaRpcClient::new_with_args(WrpcEncoding::Borsh, NotificationMode::MultiListeners, "wrpc://127.0.0.1:17110")?); + let rpc_client = Arc::new(KaspaRpcClient::new_with_args( + WrpcEncoding::Borsh, + NotificationMode::MultiListeners, + "wrpc://127.0.0.1:17110", + None, + )?); let rpc_ctl = rpc_client.ctl().clone(); let rpc_api: Arc = rpc_client; let rpc = Rpc::new(rpc_api, rpc_ctl);