From 1ad955b1fae07fb31c7a50950f7856a82fb758ad Mon Sep 17 00:00:00 2001 From: modship Date: Tue, 13 Jun 2023 16:13:32 +0200 Subject: [PATCH 01/24] feat(channel) : impl send_deadline on sender and recv_deadline / recv_timeout on receiver --- massa-channel/src/receiver.rs | 35 ++++++++++++++++++++++++++++++++++- massa-channel/src/sender.rs | 15 ++++++++++++++- 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/massa-channel/src/receiver.rs b/massa-channel/src/receiver.rs index 6ec638e7cc1..59f7a2f420c 100644 --- a/massa-channel/src/receiver.rs +++ b/massa-channel/src/receiver.rs @@ -1,9 +1,10 @@ use std::{ ops::{Deref, DerefMut}, sync::Arc, + time::{Duration, Instant}, }; -use crossbeam::channel::{Receiver, RecvError, TryRecvError}; +use crossbeam::channel::{Receiver, RecvError, RecvTimeoutError, TryRecvError}; use prometheus::{Counter, Gauge}; #[derive(Clone)] @@ -63,6 +64,38 @@ impl MassaReceiver { } } + pub fn recv_deadline(&self, deadline: Instant) -> Result { + match self.receiver.recv_deadline(deadline) { + Ok(msg) => { + self.inc_metrics(); + + Ok(msg) + } + Err(e) => { + let _ = prometheus::unregister(Box::new(self.actual_len.clone())); + let _ = prometheus::unregister(Box::new(self.received.clone())); + + Err(e) + } + } + } + + pub fn recv_timeout(&self, timeout: Duration) -> Result { + match self.receiver.recv_timeout(timeout) { + Ok(msg) => { + self.inc_metrics(); + + Ok(msg) + } + Err(e) => { + let _ = prometheus::unregister(Box::new(self.actual_len.clone())); + let _ = prometheus::unregister(Box::new(self.received.clone())); + + Err(e) + } + } + } + pub fn recv(&self) -> Result { match self.receiver.recv() { Ok(msg) => { diff --git a/massa-channel/src/sender.rs b/massa-channel/src/sender.rs index 187b58aaa7c..6276ee9d1be 100644 --- a/massa-channel/src/sender.rs +++ b/massa-channel/src/sender.rs @@ -1,4 +1,7 @@ -use std::{ops::Deref, time::Duration}; +use std::{ + ops::Deref, + time::{Duration, Instant}, +}; use crossbeam::channel::{SendError, SendTimeoutError, Sender, TrySendError}; use prometheus::Gauge; @@ -34,6 +37,16 @@ impl MassaSender { } } + pub fn send_deadline(&self, msg: T, deadline: Instant) -> Result<(), SendTimeoutError> { + match self.sender.send_deadline(msg, deadline) { + Ok(()) => { + self.actual_len.inc(); + Ok(()) + } + Err(e) => Err(e), + } + } + pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { match self.sender.try_send(msg) { Ok(()) => { From 214a16e861eb8c23987890157d7bd00ee9ed087d Mon Sep 17 00:00:00 2001 From: modship Date: Tue, 13 Jun 2023 16:16:08 +0200 Subject: [PATCH 02/24] update block graph counter --- massa-consensus-worker/src/state/process.rs | 28 ++++++++-------- massa-metrics/src/lib.rs | 37 ++++++++++++--------- massa-storage/src/block_indexes.rs | 7 ++-- 3 files changed, 39 insertions(+), 33 deletions(-) diff --git a/massa-consensus-worker/src/state/process.rs b/massa-consensus-worker/src/state/process.rs index 95d4b5e6bea..9fd28261547 100644 --- a/massa-consensus-worker/src/state/process.rs +++ b/massa-consensus-worker/src/state/process.rs @@ -330,19 +330,6 @@ impl ConsensusState { "block_id": add_block_id }); - { - // set metrics - let add_slot_timestamp = timeslots::get_block_slot_timestamp( - self.config.thread_count, - self.config.t0, - self.config.genesis_timestamp, - add_block_slot, - )?; - let now = MassaTime::now()?; - let diff = now.saturating_sub(add_slot_timestamp); - self.massa_metrics.set_block_graph_diff_ms(diff.to_millis()); - } - // Ensure block parents are claimed by the block's storage. // Note that operations and endorsements should already be there (claimed in Protocol). storage.claim_block_refs(&parents_hash_period.iter().map(|(p_id, _)| *p_id).collect()); @@ -498,6 +485,21 @@ impl ConsensusState { self.mark_final_blocks(&add_block_id, final_blocks)?; massa_trace!("consensus.block_graph.add_block_to_graph.end", {}); + + { + // set metrics + let add_slot_timestamp = timeslots::get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + add_block_slot, + )?; + let now = MassaTime::now()?; + let diff = now.saturating_sub(add_slot_timestamp); + self.massa_metrics.inc_block_graph_counter(); + self.massa_metrics.inc_block_graph_ms(diff.to_millis()); + } + Ok(()) } diff --git a/massa-metrics/src/lib.rs b/massa-metrics/src/lib.rs index b9a1320d741..6269d0ece66 100644 --- a/massa-metrics/src/lib.rs +++ b/massa-metrics/src/lib.rs @@ -1,5 +1,5 @@ use lazy_static::lazy_static; -use prometheus::{register_int_gauge, Gauge, IntGauge}; +use prometheus::{register_int_gauge, Gauge, IntCounter, IntGauge}; #[cfg(not(feature = "testing"))] mod server; @@ -15,7 +15,7 @@ lazy_static! { static ref OPERATIONS_COUNTER: IntGauge = register_int_gauge!("operations_counter", "operations counter len").unwrap(); static ref BLOCKS_COUNTER: IntGauge = register_int_gauge!("blocks_counter", "blocks counter len").unwrap(); static ref ENDORSEMENTS_COUNTER: IntGauge = register_int_gauge!("endorsements_counter", "endorsements counter len").unwrap(); - static ref DELTA_BLOCK_GRAPH_SLOT: IntGauge = register_int_gauge!("delta_block_graph_slot", "delta in ms between block inclusion in graph and block slot").unwrap(); + // static ref BLOCK_GRAPH_SLOT_TIME: IntGauge = register_int_gauge!("block_graph_slot_time", "sum of delta in ms between block inclusion in graph and block slot").unwrap(); // static ref A_INT_GAUGE: IntGauge = register_int_gauge!("A_int_gauge", "foobar").unwrap(); @@ -26,12 +26,8 @@ pub fn set_connections(in_connections: usize, out_connections: usize) { OUT_CONNECTIONS.set(out_connections as i64); } -pub fn inc_blocks_counter() { - BLOCKS_COUNTER.inc(); -} - -pub fn dec_blocks_counter() { - BLOCKS_COUNTER.dec(); +pub fn set_blocks_counter(val: usize) { + BLOCKS_COUNTER.set(val as i64); } pub fn inc_endorsements_counter() { @@ -54,7 +50,8 @@ pub fn dec_operations_counter() { pub struct MassaMetrics { consensus_vec: Vec, - block_graph_diff_ms: IntGauge, + block_graph_counter: IntCounter, + block_graph_ms: IntCounter, active_in_connections: IntGauge, active_out_connections: IntGauge, @@ -227,9 +224,11 @@ impl MassaMetrics { ) .unwrap(); - let block_graph_diff_ms = IntGauge::new( - "block_slot_graph_diff_ms", - "time in ms between slot creation and his inclusion in graph", + let block_graph_counter = + IntCounter::new("block_slot_graph_counter", "total block in graph").unwrap(); + let block_graph_ms = IntCounter::new( + "block_slot_graph_ms", + "sum of delta in ms between block inclusion in graph and block slot", ) .unwrap(); @@ -265,13 +264,15 @@ impl MassaMetrics { let _ = prometheus::register(Box::new(endorsement_cache_checked_endorsements.clone())); let _ = prometheus::register(Box::new(endorsement_cache_known_by_peer.clone())); - let _ = prometheus::register(Box::new(block_graph_diff_ms.clone())); + let _ = prometheus::register(Box::new(block_graph_counter.clone())); + let _ = prometheus::register(Box::new(block_graph_ms.clone())); } } MassaMetrics { consensus_vec, - block_graph_diff_ms, + block_graph_counter, + block_graph_ms, active_in_connections, active_out_connections, retrieval_thread_stored_operations_sum, @@ -397,8 +398,12 @@ impl MassaMetrics { .set(known_by_peer as i64); } - pub fn set_block_graph_diff_ms(&self, diff: u64) { - self.block_graph_diff_ms.set(diff as i64); + pub fn inc_block_graph_ms(&self, diff: u64) { + self.block_graph_ms.inc_by(diff); + } + + pub fn inc_block_graph_counter(&self) { + self.block_graph_counter.inc(); } } // mod test { diff --git a/massa-storage/src/block_indexes.rs b/massa-storage/src/block_indexes.rs index 96efc0904c7..9ebfa236050 100644 --- a/massa-storage/src/block_indexes.rs +++ b/massa-storage/src/block_indexes.rs @@ -33,8 +33,6 @@ impl BlockIndexes { pub(crate) fn insert(&mut self, block: SecureShareBlock) { if let Ok(b) = self.blocks.try_insert(block.id, block) { - massa_metrics::inc_blocks_counter(); - // update creator index self.index_by_creator .entry(b.content_creator_address) @@ -59,6 +57,8 @@ impl BlockIndexes { .or_default() .insert(b.id); } + + massa_metrics::set_blocks_counter(self.blocks.len()); } } @@ -67,8 +67,6 @@ impl BlockIndexes { /// * `block_id`: the block id to remove pub(crate) fn remove(&mut self, block_id: &BlockId) -> Option { if let Some(b) = self.blocks.remove(block_id) { - massa_metrics::dec_blocks_counter(); - // update creator index if let hash_map::Entry::Occupied(mut occ) = self.index_by_creator.entry(b.content_creator_address) @@ -108,6 +106,7 @@ impl BlockIndexes { } } } + massa_metrics::set_blocks_counter(self.blocks.len()); return Some(b); } None From ae041f017bc696d65942fd586958c09c4a8a99bd Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 14 Jun 2023 11:32:51 +0200 Subject: [PATCH 03/24] Add debug logs. --- Cargo.lock | 22 ++++++++++++++++--- massa-node/Cargo.toml | 2 +- massa-protocol-worker/Cargo.toml | 2 +- .../src/handlers/block_handler/retrieval.rs | 16 +++++++------- massa-protocol-worker/src/messages.rs | 2 ++ 5 files changed, 31 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3f56be93edb..201c4442564 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2388,7 +2388,7 @@ dependencies = [ "massa_wallet", "parking_lot", "paw", - "peernet", + "peernet 0.1.0 (git+https://github.com/massalabs/PeerNet?branch=debug_logs)", "rand 0.8.5", "serde", "structopt", @@ -3024,7 +3024,7 @@ dependencies = [ "massa_versioning", "mockall", "nom", - "peernet", + "peernet 0.1.0 (git+https://github.com/massalabs/PeerNet?rev=bf8adf5)", "serde", "serde_json", "tempfile", @@ -3052,7 +3052,7 @@ dependencies = [ "nom", "num_enum", "parking_lot", - "peernet", + "peernet 0.1.0 (git+https://github.com/massalabs/PeerNet?branch=debug_logs)", "rand 0.8.5", "rayon", "schnellru", @@ -3642,6 +3642,22 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +[[package]] +name = "peernet" +version = "0.1.0" +source = "git+https://github.com/massalabs/PeerNet?branch=debug_logs#fe9b596161366553cccb0446a43218b288be1315" +dependencies = [ + "crossbeam", + "enum_delegate", + "mio", + "parking_lot", + "quiche", + "rand 0.8.5", + "serde", + "stream_limiter", + "thiserror", +] + [[package]] name = "peernet" version = "0.1.0" diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index 9bb9380566e..0a02de3a396 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -17,7 +17,7 @@ tracing = { version = "0.1", features = [ "max_level_debug", "release_max_level_debug", ] } -peernet = { git = "https://github.com/massalabs/PeerNet", rev = "bf8adf5" } +peernet = { git = "https://github.com/massalabs/PeerNet", branch = "debug_logs" } tracing-subscriber = "0.3" paw = "1.0" rand = { version = "0.8.5", optional = true } diff --git a/massa-protocol-worker/Cargo.toml b/massa-protocol-worker/Cargo.toml index 88e350aa7db..990591aacbb 100644 --- a/massa-protocol-worker/Cargo.toml +++ b/massa-protocol-worker/Cargo.toml @@ -13,7 +13,7 @@ serde_json = "1.0" nom = "=7.1" num_enum = "0.5" # TODO tag peernet version -peernet = { git = "https://github.com/massalabs/PeerNet", rev = "bf8adf5" } +peernet = { git = "https://github.com/massalabs/PeerNet", branch = "debug_logs" } tempfile = { version = "3.3", optional = true } # use with testing feature rayon = "1.7.0" schnellru = "0.2.1" diff --git a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs index 4e0dade87bf..7a476ef93fd 100644 --- a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs +++ b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs @@ -33,14 +33,14 @@ use massa_models::{ operation::{OperationId, SecureShareOperation}, prehash::{CapacityAllocator, PreHashMap, PreHashSet}, secure_share::{Id, SecureShare}, - timeslots::get_block_slot_timestamp, + timeslots::get_block_slot_timestamp, config::{T0, GENESIS_TIMESTAMP, THREAD_COUNT}, }; use massa_pool_exports::PoolController; use massa_protocol_exports::PeerId; use massa_protocol_exports::{ProtocolConfig, ProtocolError}; use massa_serialization::{DeserializeError, Deserializer, Serializer}; use massa_storage::Storage; -use massa_time::TimeError; +use massa_time::{TimeError, MassaTime}; use massa_versioning::versioning::MipStore; use schnellru::{ByLength, LruMap}; use tracing::{debug, info, warn}; @@ -144,13 +144,13 @@ impl RetrievalThread { } match message { BlockMessage::AskForBlocks(block_infos) => { - debug!("Received block message: AskForBlocks from {}", peer_id); + debug!("AURELIEN: Received block message: AskForBlocks {:?} from {}", block_infos, peer_id); if let Err(err) = self.on_asked_for_blocks_received(peer_id.clone(), block_infos) { warn!("Error in on_asked_for_blocks_received: {:?}", err); } } BlockMessage::ReplyForBlocks(block_infos) => { - debug!("Received block message: ReplyForBlocks from {}", peer_id); + debug!("AURELIEN: Received block message: ReplyForBlocks {:?} from {}", block_infos, peer_id); for (block_id, block_info) in block_infos.into_iter() { if let Err(err) = self.on_block_info_received(peer_id.clone(), block_id, block_info) { warn!("Error in on_block_info_received: {:?}", err); @@ -161,7 +161,7 @@ impl RetrievalThread { } } BlockMessage::BlockHeader(header) => { - debug!("Received block message: BlockHeader from {}", peer_id); + debug!("AURELIEN: Received block message: BlockHeader {:?} from {}", header, peer_id); massa_trace!(BLOCK_HEADER, { "peer_id": peer_id, "header": header}); if let Ok(Some((block_id, is_new))) = self.note_header_from_peer(&header, &peer_id) @@ -199,7 +199,7 @@ impl RetrievalThread { Ok(command) => { match command { BlockHandlerRetrievalCommand::WishlistDelta { new, remove } => { - debug!("Received block message: command WishlistDelta"); + debug!("AURELIEN: Received block message: command WishlistDelta {:?} {:?}", new, remove); massa_trace!("protocol.protocol_worker.process_command.wishlist_delta.begin", { "new": new, "remove": remove }); for (block_id, header) in new.into_iter() { self.block_wishlist.insert( @@ -236,8 +236,6 @@ impl RetrievalThread { } }, recv(tick_update_metrics) -> _ => { - - // update metrics { let block_read = self.cache.read(); @@ -980,6 +978,8 @@ impl RetrievalThread { header: header.clone(), operations: block_operation_ids.clone(), }; + let latency = MassaTime::now().unwrap().saturating_sub(get_block_slot_timestamp(THREAD_COUNT, T0, *GENESIS_TIMESTAMP, header.content.slot).unwrap()); + println!("AURELIEN: Finish receive block id {} from peer {} with a latency of {}", block_id, from_peer_id); let mut content_serialized = Vec::new(); BlockSerializer::new() // todo : keep the serializer in the struct to avoid recreating it diff --git a/massa-protocol-worker/src/messages.rs b/massa-protocol-worker/src/messages.rs index 5b6a40f9b0f..d6e59dd85aa 100644 --- a/massa-protocol-worker/src/messages.rs +++ b/massa-protocol-worker/src/messages.rs @@ -10,6 +10,7 @@ use peernet::{ MessagesHandler as PeerNetMessagesHandler, MessagesSerializer as PeerNetMessagesSerializer, }, }; +use tracing::debug; use crate::handlers::{ block_handler::{BlockMessage, BlockMessageSerializer}, @@ -227,6 +228,7 @@ pub struct MessagesHandler { impl PeerNetMessagesHandler for MessagesHandler { fn handle(&self, data: &[u8], peer_id: &PeerId) -> PeerNetResult<()> { + debug!("AURELIEN: received message of len {:?}", data.len()); let (data, raw_id) = self .id_deserializer .deserialize::(data) From 05c83e395954c70aaea545965d09cfaa98917459 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 14 Jun 2023 11:33:57 +0200 Subject: [PATCH 04/24] fix compil --- Cargo.lock | 22 +++---------------- massa-protocol-exports/Cargo.toml | 2 +- .../src/handlers/block_handler/retrieval.rs | 2 +- 3 files changed, 5 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 201c4442564..de7f33b54fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2388,7 +2388,7 @@ dependencies = [ "massa_wallet", "parking_lot", "paw", - "peernet 0.1.0 (git+https://github.com/massalabs/PeerNet?branch=debug_logs)", + "peernet", "rand 0.8.5", "serde", "structopt", @@ -3024,7 +3024,7 @@ dependencies = [ "massa_versioning", "mockall", "nom", - "peernet 0.1.0 (git+https://github.com/massalabs/PeerNet?rev=bf8adf5)", + "peernet", "serde", "serde_json", "tempfile", @@ -3052,7 +3052,7 @@ dependencies = [ "nom", "num_enum", "parking_lot", - "peernet 0.1.0 (git+https://github.com/massalabs/PeerNet?branch=debug_logs)", + "peernet", "rand 0.8.5", "rayon", "schnellru", @@ -3658,22 +3658,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "peernet" -version = "0.1.0" -source = "git+https://github.com/massalabs/PeerNet?rev=bf8adf5#bf8adf50e2427a6692c65f0dbb53c69b504185e0" -dependencies = [ - "crossbeam", - "enum_delegate", - "mio", - "parking_lot", - "quiche", - "rand 0.8.5", - "serde", - "stream_limiter", - "thiserror", -] - [[package]] name = "percent-encoding" version = "2.2.0" diff --git a/massa-protocol-exports/Cargo.toml b/massa-protocol-exports/Cargo.toml index 13cf19ee0d0..31c731c25ce 100644 --- a/massa-protocol-exports/Cargo.toml +++ b/massa-protocol-exports/Cargo.toml @@ -11,7 +11,7 @@ nom = "=7.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" # TODO tag peernet version -peernet = { git = "https://github.com/massalabs/PeerNet", rev = "bf8adf5" } +peernet = { git = "https://github.com/massalabs/PeerNet", branch = "debug_logs" } tempfile = { version = "3.3", optional = true } # use with testing feature mockall = "0.11.4" diff --git a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs index 7a476ef93fd..a07cdbb034c 100644 --- a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs +++ b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs @@ -979,7 +979,7 @@ impl RetrievalThread { operations: block_operation_ids.clone(), }; let latency = MassaTime::now().unwrap().saturating_sub(get_block_slot_timestamp(THREAD_COUNT, T0, *GENESIS_TIMESTAMP, header.content.slot).unwrap()); - println!("AURELIEN: Finish receive block id {} from peer {} with a latency of {}", block_id, from_peer_id); + println!("AURELIEN: Finish receive block id {} from peer {} with a latency of {}", block_id, from_peer_id, latency); let mut content_serialized = Vec::new(); BlockSerializer::new() // todo : keep the serializer in the struct to avoid recreating it From a1792de69a2b4565489c82a789229b28919b619f Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 14 Jun 2023 11:40:34 +0200 Subject: [PATCH 05/24] Add more logs --- .../src/handlers/block_handler/retrieval.rs | 20 +++++++++++++++---- massa-protocol-worker/src/wrap_network.rs | 1 + 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs index a07cdbb034c..465a527b8a1 100644 --- a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs +++ b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs @@ -29,18 +29,19 @@ use massa_models::{ block::{Block, BlockSerializer}, block_header::SecuredHeader, block_id::BlockId, + config::{GENESIS_TIMESTAMP, T0, THREAD_COUNT}, endorsement::SecureShareEndorsement, operation::{OperationId, SecureShareOperation}, prehash::{CapacityAllocator, PreHashMap, PreHashSet}, secure_share::{Id, SecureShare}, - timeslots::get_block_slot_timestamp, config::{T0, GENESIS_TIMESTAMP, THREAD_COUNT}, + timeslots::get_block_slot_timestamp, }; use massa_pool_exports::PoolController; use massa_protocol_exports::PeerId; use massa_protocol_exports::{ProtocolConfig, ProtocolError}; use massa_serialization::{DeserializeError, Deserializer, Serializer}; use massa_storage::Storage; -use massa_time::{TimeError, MassaTime}; +use massa_time::{MassaTime, TimeError}; use massa_versioning::versioning::MipStore; use schnellru::{ByLength, LruMap}; use tracing::{debug, info, warn}; @@ -978,8 +979,19 @@ impl RetrievalThread { header: header.clone(), operations: block_operation_ids.clone(), }; - let latency = MassaTime::now().unwrap().saturating_sub(get_block_slot_timestamp(THREAD_COUNT, T0, *GENESIS_TIMESTAMP, header.content.slot).unwrap()); - println!("AURELIEN: Finish receive block id {} from peer {} with a latency of {}", block_id, from_peer_id, latency); + let latency = MassaTime::now().unwrap().saturating_sub( + get_block_slot_timestamp( + THREAD_COUNT, + T0, + *GENESIS_TIMESTAMP, + header.content.slot, + ) + .unwrap(), + ); + println!( + "AURELIEN: Finish receive block id {} from peer {} with a latency of {}", + block_id, from_peer_id, latency + ); let mut content_serialized = Vec::new(); BlockSerializer::new() // todo : keep the serializer in the struct to avoid recreating it diff --git a/massa-protocol-worker/src/wrap_network.rs b/massa-protocol-worker/src/wrap_network.rs index 7a6cc2ba845..95df2e74146 100644 --- a/massa-protocol-worker/src/wrap_network.rs +++ b/massa-protocol-worker/src/wrap_network.rs @@ -49,6 +49,7 @@ impl ActiveConnectionsTrait for SharedActiveConnections { high_priority: bool, ) -> Result<(), ProtocolError> { if let Some(connection) = self.read().connections.get(peer_id) { + println!("AURELIEN: send to peer {}, message {:?}", peer_id, message); connection .send_channels .try_send(message_serializer, message, high_priority) From f8c281dc002c19588a5e1572c72014b137e7a437 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 14 Jun 2023 16:07:09 +0200 Subject: [PATCH 06/24] Try to fix caches --- massa-protocol-worker/src/connectivity.rs | 3 +- .../src/handlers/block_handler/mod.rs | 6 +- .../src/handlers/block_handler/retrieval.rs | 137 ++++++++++++++++-- .../endorsement_handler/propagation.rs | 4 +- 4 files changed, 135 insertions(+), 15 deletions(-) diff --git a/massa-protocol-worker/src/connectivity.rs b/massa-protocol-worker/src/connectivity.rs index 21677095364..1441bd55cfb 100644 --- a/massa-protocol-worker/src/connectivity.rs +++ b/massa-protocol-worker/src/connectivity.rs @@ -151,7 +151,7 @@ pub(crate) fn start_connectivity_thread( channel_endorsements.1, protocol_channels.endorsement_handler_retrieval.0, protocol_channels.endorsement_handler_retrieval.1, - sender_endorsements_propagation_ext, + sender_endorsements_propagation_ext.clone(), protocol_channels.endorsement_handler_propagation.1.clone(), peer_management_handler.sender.command_sender.clone(), massa_metrics.clone(), @@ -166,6 +166,7 @@ pub(crate) fn start_connectivity_thread( protocol_channels.block_handler_propagation.1.clone(), sender_blocks_propagation_ext, sender_operations_propagation_ext, + sender_endorsements_propagation_ext, peer_management_handler.sender.command_sender.clone(), config.clone(), endorsement_cache, diff --git a/massa-protocol-worker/src/handlers/block_handler/mod.rs b/massa-protocol-worker/src/handlers/block_handler/mod.rs index f2b3fecb7d2..82c6fedd9ae 100644 --- a/massa-protocol-worker/src/handlers/block_handler/mod.rs +++ b/massa-protocol-worker/src/handlers/block_handler/mod.rs @@ -31,7 +31,9 @@ pub use messages::{ }; use super::{ - endorsement_handler::cache::SharedEndorsementCache, + endorsement_handler::{ + cache::SharedEndorsementCache, commands_propagation::EndorsementHandlerPropagationCommand, + }, operation_handler::{ cache::SharedOperationCache, commands_propagation::OperationHandlerPropagationCommand, }, @@ -56,6 +58,7 @@ impl BlockHandler { internal_receiver: MassaReceiver, internal_sender: MassaSender, sender_propagations_ops: MassaSender, + sender_propagations_endorsements: MassaSender, peer_cmd_sender: MassaSender, config: ProtocolConfig, endorsement_cache: SharedEndorsementCache, @@ -73,6 +76,7 @@ impl BlockHandler { receiver_ext, internal_sender.clone(), sender_propagations_ops, + sender_propagations_endorsements, peer_cmd_sender.clone(), config.clone(), endorsement_cache, diff --git a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs index 465a527b8a1..b8f1711a065 100644 --- a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs +++ b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs @@ -6,7 +6,10 @@ use std::{ use crate::{ handlers::{ - endorsement_handler::cache::SharedEndorsementCache, + endorsement_handler::{ + cache::SharedEndorsementCache, + commands_propagation::EndorsementHandlerPropagationCommand, + }, operation_handler::{ cache::SharedOperationCache, commands_propagation::OperationHandlerPropagationCommand, }, @@ -34,6 +37,7 @@ use massa_models::{ operation::{OperationId, SecureShareOperation}, prehash::{CapacityAllocator, PreHashMap, PreHashSet}, secure_share::{Id, SecureShare}, + slot::Slot, timeslots::get_block_slot_timestamp, }; use massa_pool_exports::PoolController; @@ -96,6 +100,7 @@ pub struct RetrievalThread { asked_blocks: HashMap>, peer_cmd_sender: MassaSender, sender_propagation_ops: MassaSender, + sender_propagation_endorsements: MassaSender, endorsement_cache: SharedEndorsementCache, operation_cache: SharedOperationCache, next_timer_ask_block: Instant, @@ -516,7 +521,7 @@ impl RetrievalThread { .get_or_insert(from_peer_id.clone(), || { LruMap::new(ByLength::new( self.config - .max_node_known_blocks_size + .max_node_known_endorsements_size .try_into() .expect("max_node_known_blocks_size in config must be > 0"), )) @@ -588,7 +593,7 @@ impl RetrievalThread { .get_or_insert(from_peer_id.clone(), || { LruMap::new(ByLength::new( self.config - .max_node_known_blocks_size + .max_node_known_endorsements_size .try_into() .expect("max_node_known_blocks_size in config must be > 0"), )) @@ -638,7 +643,7 @@ impl RetrievalThread { endorsements: Vec, from_peer_id: &PeerId, ) -> Result<(), ProtocolError> { - massa_trace!("protocol.protocol_worker.note_endorsements_from_peer", { "peer": from_peer_id, "endorsements": endorsements}); + massa_trace!("protocol.protocol_worker.note_endorsements_from_node", { "node": from_peer_id, "endorsements": endorsements}); let length = endorsements.len(); let mut new_endorsements = PreHashMap::with_capacity(length); let mut endorsement_ids = PreHashSet::with_capacity(length); @@ -672,7 +677,6 @@ impl RetrievalThread { }) .collect::>(), )?; - 'write_cache: { let mut cache_write = self.endorsement_cache.write(); // add to verified signature cache @@ -702,6 +706,49 @@ impl RetrievalThread { if !new_endorsements.is_empty() { let mut endorsements = self.storage.clone_without_refs(); endorsements.store_endorsements(new_endorsements.into_values().collect()); + + // Propagate endorsements + // Propagate endorsements when the slot of the block they endorse isn't `max_endorsements_propagation_time` old. + let mut endorsements_to_propagate = endorsements.clone(); + let endorsements_to_not_propagate = { + let now = MassaTime::now()?; + let read_endorsements = endorsements_to_propagate.read_endorsements(); + endorsements_to_propagate + .get_endorsement_refs() + .iter() + .filter_map(|endorsement_id| { + let slot_endorsed_block = + read_endorsements.get(endorsement_id).unwrap().content.slot; + let slot_timestamp = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + slot_endorsed_block, + ); + match slot_timestamp { + Ok(slot_timestamp) => { + if slot_timestamp + .saturating_add(self.config.max_endorsements_propagation_time) + < now + { + Some(*endorsement_id) + } else { + None + } + } + Err(_) => Some(*endorsement_id), + } + }) + .collect() + }; + endorsements_to_propagate.drop_endorsement_refs(&endorsements_to_not_propagate); + if let Err(err) = self.sender_propagation_endorsements.try_send( + EndorsementHandlerPropagationCommand::PropagateEndorsements( + endorsements_to_propagate, + ), + ) { + warn!("Failed to send from block retrieval thread of endorsement handler to propagation: {:?}", err); + } // Add to pool self.pool_controller.add_endorsements(endorsements); } @@ -1051,6 +1098,7 @@ impl RetrievalThread { massa_trace!("protocol.protocol_worker.note_operations_from_peer", { "peer": source_peer_id, "operations": operations }); let length = operations.len(); let mut new_operations = PreHashMap::with_capacity(length); + let mut received_ids = PreHashSet::with_capacity(length); for operation in operations { let operation_id = operation.id; if operation.serialized_size() > self.config.max_serialized_operations_size_per_block { @@ -1061,6 +1109,7 @@ impl RetrievalThread { self.config.max_serialized_operations_size_per_block ))); }; + received_ids.insert(operation_id); // Check operation signature only if not already checked. if self @@ -1074,6 +1123,7 @@ impl RetrievalThread { new_operations.insert(operation_id, operation); }; } + // optimized signature verification verify_sigs_batch( &new_operations @@ -1081,18 +1131,81 @@ impl RetrievalThread { .map(|(op_id, op)| (*op_id.get_hash(), op.signature, op.content_creator_pub_key)) .collect::>(), )?; - { + + 'write_cache: { // add to checked operations let mut cache_write = self.operation_cache.write(); for op_id in new_operations.keys().copied() { cache_write.insert_checked_operation(op_id); } + + // add to known ops + let Ok(known_ops) = cache_write + .ops_known_by_peer + .get_or_insert(source_peer_id.clone(), || { + LruMap::new(ByLength::new( + self.config + .max_node_known_ops_size + .try_into() + .expect("max_node_known_ops_size in config must be > 0"), + )) + }) + .ok_or(()) else { + warn!("ops_known_by_peer limitation reached"); + break 'write_cache; + }; + for id in received_ids { + known_ops.insert(id.prefix(), ()); + } } - self.sender_propagation_ops - .try_send(OperationHandlerPropagationCommand::AnnounceOperations( - new_operations.keys().copied().collect(), - )) - .map_err(|err| ProtocolError::ChannelError(err.to_string()))?; + + if !new_operations.is_empty() { + // Store operation, claim locally + let mut ops = self.storage.clone_without_refs(); + ops.store_operations(new_operations.into_values().collect()); + + // Propagate operations when their expire period isn't `max_operations_propagation_time` old. + let mut ops_to_propagate = ops.clone(); + let operations_to_not_propagate = { + let now = MassaTime::now()?; + let read_operations = ops_to_propagate.read_operations(); + ops_to_propagate + .get_op_refs() + .iter() + .filter(|op_id| { + let expire_period = + read_operations.get(op_id).unwrap().content.expire_period; + let expire_period_timestamp = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + Slot::new(expire_period, 0), + ); + match expire_period_timestamp { + Ok(slot_timestamp) => { + slot_timestamp + .saturating_add(self.config.max_operations_propagation_time) + < now + } + Err(_) => true, + } + }) + .copied() + .collect() + }; + ops_to_propagate.drop_operation_refs(&operations_to_not_propagate); + let to_announce: PreHashSet = + ops_to_propagate.get_op_refs().iter().copied().collect(); + self.storage.extend(ops_to_propagate); + self.sender_propagation_ops + .try_send(OperationHandlerPropagationCommand::AnnounceOperations( + to_announce, + )) + .map_err(|err| ProtocolError::SendError(err.to_string()))?; + // Add to pool + self.pool_controller.add_operations(ops); + } + Ok(()) } @@ -1345,6 +1458,7 @@ pub fn start_retrieval_thread( receiver: MassaReceiver, _internal_sender: MassaSender, sender_propagation_ops: MassaSender, + sender_propagation_endorsements: MassaSender, peer_cmd_sender: MassaSender, config: ProtocolConfig, endorsement_cache: SharedEndorsementCache, @@ -1368,6 +1482,7 @@ pub fn start_retrieval_thread( asked_blocks: HashMap::default(), peer_cmd_sender, sender_propagation_ops, + sender_propagation_endorsements, receiver_network, block_message_serializer, receiver, diff --git a/massa-protocol-worker/src/handlers/endorsement_handler/propagation.rs b/massa-protocol-worker/src/handlers/endorsement_handler/propagation.rs index 09f7d335aa4..8b1fc96a5b7 100644 --- a/massa-protocol-worker/src/handlers/endorsement_handler/propagation.rs +++ b/massa-protocol-worker/src/handlers/endorsement_handler/propagation.rs @@ -48,14 +48,14 @@ impl PropagationThread { } } } - let endorsements_ids: PreHashSet = endorsements + let ids: PreHashSet = endorsements .get_endorsement_refs() .iter() .copied() .collect(); { let mut cache_write = self.cache.write(); - for endorsement_id in endorsements_ids.iter().copied() { + for endorsement_id in ids.iter().copied() { cache_write.checked_endorsements.insert(endorsement_id, ()); } // Add peers that potentially don't exist in cache From caf4e3073eba56dfd943f12b9b91e8732cf70652 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 14 Jun 2023 17:37:32 +0200 Subject: [PATCH 07/24] Add print block prod --- massa-factory-worker/src/block_factory.rs | 38 ++++++++++++++++++++--- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index 41d52bf659f..e6961dbf323 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -134,8 +134,8 @@ impl BlockFactoryWorker { }; debug!( - "block factory selected block producer address for slot {}: {}", - slot, block_producer_addr + "AURELIEN: Block prod {}: block factory selected block producer address for slot {}: {}", + slot, slot, block_producer_addr ); // check if the block producer address is handled by the wallet @@ -149,9 +149,17 @@ impl BlockFactoryWorker { // the selected block producer is not managed locally => quit return; }; + debug!( + "AURELIEN: Block prod {}: kp fetched", + slot, + ); // get best parents and their periods let parents: Vec<(BlockId, u64)> = self.channels.consensus.get_best_parents(); // Vec<(parent_id, parent_period)> // generate the local storage object + debug!( + "AURELIEN: Block prod {}: parents fetched", + slot, + ); let mut block_storage = self.channels.storage.clone_without_refs(); // claim block parents in local storage @@ -172,12 +180,19 @@ impl BlockFactoryWorker { // will not panic because the thread is validated before the call let (same_thread_parent_id, _) = parents[slot.thread as usize]; + debug!( + "AURELIEN: Block prod {}: before endo fetched", + slot, + ); // gather endorsements let (endorsements_ids, endo_storage) = self .channels .pool .get_block_endorsements(&same_thread_parent_id, &slot); - + debug!( + "AURELIEN: Block prod {}: after endo fetched", + slot, + ); //TODO: Do we want ot populate only with endorsement id in the future ? let endorsements: Vec = { let endo_read = endo_storage.read_endorsements(); @@ -195,8 +210,15 @@ impl BlockFactoryWorker { block_storage.extend(endo_storage); // gather operations and compute global operations hash + debug!( + "AURELIEN: Block prod {}: before ops fetched", + slot, + ); let (op_ids, op_storage) = self.channels.pool.get_block_operations(&slot); - + debug!( + "AURELIEN: Block prod {}: after ops fetched", + slot, + ); if op_ids.len() > self.cfg.max_operations_per_block as usize { warn!("Too many operations returned"); return; @@ -211,6 +233,10 @@ impl BlockFactoryWorker { ); // create header + debug!( + "AURELIEN: Block prod {}: before get version", + slot, + ); let current_version = self.mip_store.get_network_version_current(); let announced_version = self.mip_store.get_network_version_to_announce(); let header: SecuredHeader = BlockHeader::new_verifiable::( @@ -254,6 +280,10 @@ impl BlockFactoryWorker { self.channels .consensus .register_block(block_id, slot, block_storage, true); + debug!( + "AURELIEN: Block prod {}: after register block", + slot, + ); } /// main run loop of the block creator thread From 243a9199f1394c768bb0e01dc79241db9201dbbc Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 14 Jun 2023 17:38:30 +0200 Subject: [PATCH 08/24] Fix logs --- massa-factory-worker/src/block_factory.rs | 49 ++++++++--------------- 1 file changed, 16 insertions(+), 33 deletions(-) diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index e6961dbf323..ad2a30cbf02 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -149,17 +149,11 @@ impl BlockFactoryWorker { // the selected block producer is not managed locally => quit return; }; - debug!( - "AURELIEN: Block prod {}: kp fetched", - slot, - ); + debug!("AURELIEN: Block prod {}: kp fetched", slot,); // get best parents and their periods let parents: Vec<(BlockId, u64)> = self.channels.consensus.get_best_parents(); // Vec<(parent_id, parent_period)> // generate the local storage object - debug!( - "AURELIEN: Block prod {}: parents fetched", - slot, - ); + debug!("AURELIEN: Block prod {}: parents fetched", slot,); let mut block_storage = self.channels.storage.clone_without_refs(); // claim block parents in local storage @@ -180,19 +174,13 @@ impl BlockFactoryWorker { // will not panic because the thread is validated before the call let (same_thread_parent_id, _) = parents[slot.thread as usize]; - debug!( - "AURELIEN: Block prod {}: before endo fetched", - slot, - ); + debug!("AURELIEN: Block prod {}: before endo fetched", slot,); // gather endorsements let (endorsements_ids, endo_storage) = self .channels .pool .get_block_endorsements(&same_thread_parent_id, &slot); - debug!( - "AURELIEN: Block prod {}: after endo fetched", - slot, - ); + debug!("AURELIEN: Block prod {}: after endo fetched", slot,); //TODO: Do we want ot populate only with endorsement id in the future ? let endorsements: Vec = { let endo_read = endo_storage.read_endorsements(); @@ -210,15 +198,9 @@ impl BlockFactoryWorker { block_storage.extend(endo_storage); // gather operations and compute global operations hash - debug!( - "AURELIEN: Block prod {}: before ops fetched", - slot, - ); + debug!("AURELIEN: Block prod {}: before ops fetched", slot,); let (op_ids, op_storage) = self.channels.pool.get_block_operations(&slot); - debug!( - "AURELIEN: Block prod {}: after ops fetched", - slot, - ); + debug!("AURELIEN: Block prod {}: after ops fetched", slot,); if op_ids.len() > self.cfg.max_operations_per_block as usize { warn!("Too many operations returned"); return; @@ -233,12 +215,13 @@ impl BlockFactoryWorker { ); // create header - debug!( - "AURELIEN: Block prod {}: before get version", - slot, - ); + debug!("AURELIEN: Block prod {}: before get version", slot,); let current_version = self.mip_store.get_network_version_current(); let announced_version = self.mip_store.get_network_version_to_announce(); + debug!( + "AURELIEN: Block prod {}: before create block and get denunciations", + slot, + ); let header: SecuredHeader = BlockHeader::new_verifiable::( BlockHeader { current_version, @@ -253,7 +236,10 @@ impl BlockFactoryWorker { block_producer_keypair, ) .expect("error while producing block header"); - + debug!( + "AURELIEN: Block prod {}: after create block and get denunciations", + slot, + ); // create block let block_ = Block { header, @@ -280,10 +266,7 @@ impl BlockFactoryWorker { self.channels .consensus .register_block(block_id, slot, block_storage, true); - debug!( - "AURELIEN: Block prod {}: after register block", - slot, - ); + debug!("AURELIEN: Block prod {}: after register block", slot,); } /// main run loop of the block creator thread From 0c1148210dc7b75d6f141cf3780cadc436afebd1 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 14 Jun 2023 18:19:30 +0200 Subject: [PATCH 09/24] Add more debug on op pool --- massa-pool-worker/src/operation_pool.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/massa-pool-worker/src/operation_pool.rs b/massa-pool-worker/src/operation_pool.rs index a5d0af88a8c..663b7217f23 100644 --- a/massa-pool-worker/src/operation_pool.rs +++ b/massa-pool-worker/src/operation_pool.rs @@ -113,7 +113,7 @@ impl OperationPool { .iter() .copied() .collect::>(); - + debug!("AURELIEN: Op pool: add_operations start with {} ops: {:?}", items.len(), &items); let mut added = PreHashSet::with_capacity(items.len()); let mut removed = PreHashSet::with_capacity(items.len()); @@ -190,6 +190,7 @@ impl OperationPool { // Clean the removed operations from storage. self.storage.drop_operation_refs(&removed); + debug!("AURELIEN: Op pool: add_operations end"); } /// get operations for block creation @@ -198,6 +199,7 @@ impl OperationPool { /// - fit inside the block /// - is the most profitable for block producer pub fn get_block_operations(&self, slot: &Slot) -> (Vec, Storage) { + println!("AURELIEN: Block prod {}: Op pool: get_block_operations start", slot); // init list of selected operation IDs let mut op_ids = Vec::new(); @@ -285,6 +287,7 @@ impl OperationPool { // update balance cache *creator_balance = creator_balance.saturating_sub(op_info.max_spending); } + println!("AURELIEN: Block prod {}: Op pool: get_block_operations end iterating", slot); // generate storage let mut res_storage = self.storage.clone_without_refs(); @@ -293,6 +296,7 @@ impl OperationPool { if claimed_ops.len() != claim_ops.len() { panic!("could not claim all operations from storage"); } + println!("AURELIEN: Block prod {}: Op pool: get_block_operations end function", slot); (op_ids, res_storage) } From ac556dd05df89220430e41d74f2e789440e01d99 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 14 Jun 2023 18:35:45 +0200 Subject: [PATCH 10/24] fix print --- massa-pool-worker/src/operation_pool.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/massa-pool-worker/src/operation_pool.rs b/massa-pool-worker/src/operation_pool.rs index 663b7217f23..e9f9b4d8286 100644 --- a/massa-pool-worker/src/operation_pool.rs +++ b/massa-pool-worker/src/operation_pool.rs @@ -113,7 +113,7 @@ impl OperationPool { .iter() .copied() .collect::>(); - debug!("AURELIEN: Op pool: add_operations start with {} ops: {:?}", items.len(), &items); + debug!("AURELIEN: Op pool: add_operations start with {} ops", items.len()); let mut added = PreHashSet::with_capacity(items.len()); let mut removed = PreHashSet::with_capacity(items.len()); @@ -199,7 +199,7 @@ impl OperationPool { /// - fit inside the block /// - is the most profitable for block producer pub fn get_block_operations(&self, slot: &Slot) -> (Vec, Storage) { - println!("AURELIEN: Block prod {}: Op pool: get_block_operations start", slot); + debug!("AURELIEN: Block prod {}: Op pool: get_block_operations start", slot); // init list of selected operation IDs let mut op_ids = Vec::new(); @@ -287,7 +287,7 @@ impl OperationPool { // update balance cache *creator_balance = creator_balance.saturating_sub(op_info.max_spending); } - println!("AURELIEN: Block prod {}: Op pool: get_block_operations end iterating", slot); + debug!("AURELIEN: Block prod {}: Op pool: get_block_operations end iterating", slot); // generate storage let mut res_storage = self.storage.clone_without_refs(); @@ -296,7 +296,7 @@ impl OperationPool { if claimed_ops.len() != claim_ops.len() { panic!("could not claim all operations from storage"); } - println!("AURELIEN: Block prod {}: Op pool: get_block_operations end function", slot); + debug!("AURELIEN: Block prod {}: Op pool: get_block_operations end function", slot); (op_ids, res_storage) } From c9fc187a15d1d680843841d23196db9952a758f9 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 14 Jun 2023 18:58:33 +0200 Subject: [PATCH 11/24] Add logs on iterating. --- massa-pool-worker/src/operation_pool.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/massa-pool-worker/src/operation_pool.rs b/massa-pool-worker/src/operation_pool.rs index e9f9b4d8286..a3bab1cadea 100644 --- a/massa-pool-worker/src/operation_pool.rs +++ b/massa-pool-worker/src/operation_pool.rs @@ -214,6 +214,7 @@ impl OperationPool { // iterate over pool operations in the right thread, from best to worst for cursor in self.sorted_ops_per_thread[slot.thread as usize].iter() { + debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations start one iter on op id {}", slot, cursor.get_id()); // if we have reached the maximum number of operations, stop if remaining_ops == 0 { break; @@ -222,7 +223,7 @@ impl OperationPool { .operations .get(&cursor.get_id()) .expect("the operation should be in self.operations at this point"); - + debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations one iter on op id {} cursor acquired", slot, cursor.get_id()); // exclude ops for which the block slot is outside of their validity range if !op_info.validity_period_range.contains(&slot.period) { continue; @@ -238,6 +239,7 @@ impl OperationPool { continue; } + debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations one iter on op id {} before unexecuted check", slot, cursor.get_id()); // check if the op was already executed // TODO batch this if self @@ -247,7 +249,7 @@ impl OperationPool { { continue; } - + debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations one iter on op id {} after unexecuted check", slot, cursor.get_id()); // check balance //TODO: It's a weird behaviour because if the address is created afterwards this operation will be executed // and also it spams the pool maybe we should just try to put the operation if there is no balance and 0 gas price @@ -267,7 +269,7 @@ impl OperationPool { } else { continue; }; - + debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations one iter on op id {} after balance check", slot, cursor.get_id()); if *creator_balance < op_info.fee { continue; } @@ -286,6 +288,7 @@ impl OperationPool { // update balance cache *creator_balance = creator_balance.saturating_sub(op_info.max_spending); + debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations end one iter on op id {}", slot, cursor.get_id()); } debug!("AURELIEN: Block prod {}: Op pool: get_block_operations end iterating", slot); From 090dd480e7e8dacd9498b99afd6ecf6f16815efd Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 14 Jun 2023 23:51:40 +0200 Subject: [PATCH 12/24] adapt logs --- massa-execution-worker/src/execution.rs | 8 ++++++++ massa-factory-worker/src/block_factory.rs | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 0cc9d813448..ca865732957 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -1626,15 +1626,19 @@ impl ExecutionState { ops: &PreHashSet, thread: u8, ) -> PreHashSet { + debug!("AURELIEN: Execution: {:?} start op check", ops); let mut ops = ops.clone(); if ops.is_empty() { + debug!("AURELIEN: Execution: {:?} stop ops is empty", ops); return ops; } { // check active history + debug!("AURELIEN: Execution: {:?} before take active history read", ops); let history = self.active_history.read(); + debug!("AURELIEN: Execution: {:?} after take active history read", ops); for hist_item in history.0.iter().rev() { if hist_item.slot.thread != thread { continue; @@ -1649,12 +1653,16 @@ impl ExecutionState { return ops; } } + debug!("AURELIEN: Execution: {:?} after iter active history", ops); } { // check final state + debug!("AURELIEN: Execution: {:?} before take final state read", ops); let final_state = self.final_state.read(); + debug!("AURELIEN: Execution: {:?} after take final state read", ops); ops.retain(|op_id| !final_state.executed_ops.contains(op_id)); + debug!("AURELIEN: Execution: {:?} after filter ops final state", ops); } ops diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index ad2a30cbf02..380b7b54c41 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -149,11 +149,9 @@ impl BlockFactoryWorker { // the selected block producer is not managed locally => quit return; }; - debug!("AURELIEN: Block prod {}: kp fetched", slot,); // get best parents and their periods let parents: Vec<(BlockId, u64)> = self.channels.consensus.get_best_parents(); // Vec<(parent_id, parent_period)> // generate the local storage object - debug!("AURELIEN: Block prod {}: parents fetched", slot,); let mut block_storage = self.channels.storage.clone_without_refs(); // claim block parents in local storage @@ -174,13 +172,11 @@ impl BlockFactoryWorker { // will not panic because the thread is validated before the call let (same_thread_parent_id, _) = parents[slot.thread as usize]; - debug!("AURELIEN: Block prod {}: before endo fetched", slot,); // gather endorsements let (endorsements_ids, endo_storage) = self .channels .pool .get_block_endorsements(&same_thread_parent_id, &slot); - debug!("AURELIEN: Block prod {}: after endo fetched", slot,); //TODO: Do we want ot populate only with endorsement id in the future ? let endorsements: Vec = { let endo_read = endo_storage.read_endorsements(); @@ -218,10 +214,6 @@ impl BlockFactoryWorker { debug!("AURELIEN: Block prod {}: before get version", slot,); let current_version = self.mip_store.get_network_version_current(); let announced_version = self.mip_store.get_network_version_to_announce(); - debug!( - "AURELIEN: Block prod {}: before create block and get denunciations", - slot, - ); let header: SecuredHeader = BlockHeader::new_verifiable::( BlockHeader { current_version, From 46795f9df833ad3df1758f8ceb7ef8066d0e6f2a Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 15 Jun 2023 00:28:25 +0200 Subject: [PATCH 13/24] Add more debug print around versioning update --- massa-execution-worker/src/execution.rs | 23 +++++++++++++---------- massa-execution-worker/src/worker.rs | 12 +++++++++--- massa-protocol-worker/src/wrap_network.rs | 1 - 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index ca865732957..1cdfcc597ac 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -1240,7 +1240,6 @@ impl ExecutionState { .get_prev_slot(self.config.thread_count) .expect("overflow when iterating on slots"); } - let exec_out = self.execute_slot(slot, exec_target, selector); // apply execution output to active state @@ -1273,13 +1272,16 @@ impl ExecutionState { let first_exec_output = self.active_history.write().0.pop_front(); if let Some(exec_out) = first_exec_output { if &exec_out.slot == slot && exec_out.block_id == target_id { + debug!("AURELIEN: Execution: start cache for final slot {:?}", slot); // speculative execution front result matches what we want to compute // apply the cached output and return self.apply_final_execution_output(exec_out.clone()); + debug!("AURELIEN: Execution: end apply cache for final slot {:?}", slot); // update versioning stats self.update_versioning_stats(exec_target, slot); + debug!("AURELIEN: Execution: end versioning update for final slot {:?}", slot); debug!("execute_final_slot: found in cache, applied cache and updated versioning stats"); @@ -1298,7 +1300,7 @@ impl ExecutionState { ); } } - + debug!("AURELIEN: Execution: end cache for final slot {:?}", slot); return; } else { // speculative cache mismatch @@ -1315,6 +1317,7 @@ impl ExecutionState { ); } + debug!("AURELIEN: Execution: start recompute for final slot {:?}", slot); // truncate the whole execution queue self.active_history.write().0.clear(); self.active_cursor = self.final_cursor; @@ -1630,15 +1633,12 @@ impl ExecutionState { let mut ops = ops.clone(); if ops.is_empty() { - debug!("AURELIEN: Execution: {:?} stop ops is empty", ops); return ops; } { // check active history - debug!("AURELIEN: Execution: {:?} before take active history read", ops); let history = self.active_history.read(); - debug!("AURELIEN: Execution: {:?} after take active history read", ops); for hist_item in history.0.iter().rev() { if hist_item.slot.thread != thread { continue; @@ -1653,18 +1653,14 @@ impl ExecutionState { return ops; } } - debug!("AURELIEN: Execution: {:?} after iter active history", ops); } { // check final state - debug!("AURELIEN: Execution: {:?} before take final state read", ops); let final_state = self.final_state.read(); - debug!("AURELIEN: Execution: {:?} after take final state read", ops); ops.retain(|op_id| !final_state.executed_ops.contains(op_id)); - debug!("AURELIEN: Execution: {:?} after filter ops final state", ops); } - + debug!("AURELIEN: Execution: {:?} end op check", ops); ops } @@ -1717,6 +1713,7 @@ impl ExecutionState { exec_target: Option<&(BlockId, Storage)>, slot: &Slot, ) { + debug!("AURELIEN: Execution: Start versioning update for {:?}", slot); // update versioning statistics if let Some((block_id, storage)) = exec_target { if let Some(block) = storage.read_blocks().get(block_id) { @@ -1729,11 +1726,13 @@ impl ExecutionState { let current_version = block.content.header.content.current_version; let announced_version = block.content.header.content.announced_version; + debug!("AURELIEN: Execution: before update_network_version_stats update for {:?}", slot); if let Ok(slot_ts) = slot_ts_ { self.mip_store.update_network_version_stats( slot_ts, Some((current_version, announced_version)), ); + debug!("AURELIEN: Execution: after update_network_version_stats update for {:?}", slot); // Now write mip store changes to disk (if any) let mut db_batch = DBBatch::new(); @@ -1747,6 +1746,7 @@ impl ExecutionState { ) .unwrap(); + debug!("AURELIEN: Execution: before update_batches update for {:?}", slot); self.mip_store .update_batches( &mut db_batch, @@ -1759,12 +1759,15 @@ impl ExecutionState { slot_prev_ts, slot_ts, e ) }); + debug!("AURELIEN: Execution: after update_batches update for {:?}", slot); + debug!("AURELIEN: Execution: after update_batches update for {:?}", slot); self.final_state.write().db.write().write_batch( db_batch, db_versioning_batch, None, ); + debug!("AURELIEN: Execution: after write_batch update for {:?}", slot); } else { warn!("Unable to get slot timestamp for slot: {} in order to update mip_store stats", slot); } diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 46c251e11c0..474331dea4a 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -96,7 +96,9 @@ impl ExecutionThread { let (req, resp_tx) = req_resp.into_request_sender_pair(); // Acquire write access to the execution state (for cache updates) and execute the read-only request - let outcome = self.execution_state.write().execute_readonly_request(req); + debug!("AURELIEN: Execution: start execute_one_readonly_request {:?}", &req); + let outcome = self.execution_state.write().execute_readonly_request(req.clone()); + debug!("AURELIEN: Execution: end execute_one_readonly_request {:?}", req); // Send the execution output through resp_tx. // Ignore errors because they just mean that the request emitter dropped the received @@ -195,17 +197,21 @@ impl ExecutionThread { let run_result = self.slot_sequencer.run_task_with( |is_final: bool, slot: &Slot, content: Option<&(BlockId, Storage)>| { if is_final { + debug!("AURELIEN: Execution: start execution final of slot {:?}", slot); self.execution_state.write().execute_final_slot( slot, content, self.selector.clone(), - ) + ); + debug!("AURELIEN: Execution: end execution final of slot {:?}", slot); } else { + debug!("AURELIEN: Execution: start execution candidate of slot {:?}", slot); self.execution_state.write().execute_candidate_slot( slot, content, self.selector.clone(), - ) + ); + debug!("AURELIEN: Execution: end execution candidate of slot {:?}", slot); } }, ); diff --git a/massa-protocol-worker/src/wrap_network.rs b/massa-protocol-worker/src/wrap_network.rs index 95df2e74146..7a6cc2ba845 100644 --- a/massa-protocol-worker/src/wrap_network.rs +++ b/massa-protocol-worker/src/wrap_network.rs @@ -49,7 +49,6 @@ impl ActiveConnectionsTrait for SharedActiveConnections { high_priority: bool, ) -> Result<(), ProtocolError> { if let Some(connection) = self.read().connections.get(peer_id) { - println!("AURELIEN: send to peer {}, message {:?}", peer_id, message); connection .send_channels .try_send(message_serializer, message, high_priority) From 34d423fa1d07faf67a5003449ec8ef911920088f Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 15 Jun 2023 00:50:08 +0200 Subject: [PATCH 14/24] Add more debug ops. --- massa-execution-worker/src/execution.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 1cdfcc597ac..db1b66b86dc 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -197,11 +197,13 @@ impl ExecutionState { /// # Arguments /// * `exec_out`: execution output to apply pub fn apply_final_execution_output(&mut self, mut exec_out: ExecutionOutput) { + debug!("AURELIEN: Execution: start function cache for final slot {:?}", exec_out.slot); if self.final_cursor >= exec_out.slot { panic!("attempting to apply a final execution output at or before the current final_cursor"); } // count stats + debug!("AURELIEN: Execution: before stat counter {:?}", exec_out.slot); if exec_out.block_id.is_some() { self.stats_counter.register_final_blocks(1); self.stats_counter.register_final_executed_operations( @@ -211,11 +213,14 @@ impl ExecutionState { exec_out.state_changes.executed_denunciations_changes.len(), ); } + debug!("AURELIEN: Execution: after stat counter {:?}", exec_out.slot); + debug!("AURELIEN: Execution: before stat counter {:?}", exec_out.slot); // apply state changes to the final ledger self.final_state .write() .finalize(exec_out.slot, exec_out.state_changes); + debug!("AURELIEN: Execution: end stat counter {:?}", exec_out.slot); // update the final ledger's slot self.final_cursor = exec_out.slot; @@ -227,15 +232,20 @@ impl ExecutionState { } // append generated events to the final event store + debug!("AURELIEN: Execution: before finalize events for final slot {:?}", exec_out.slot); exec_out.events.finalize(); self.final_events.extend(exec_out.events); self.final_events.prune(self.config.max_final_events); + debug!("AURELIEN: Execution: after finalize events for final slot {:?}", exec_out.slot); // update the prometheus metrics + debug!("AURELIEN: Execution: before metrics for final slot {:?}", exec_out.slot); self.massa_metrics .set_active_cursor(self.active_cursor.period, self.active_cursor.thread); self.massa_metrics .set_final_cursor(self.final_cursor.period, self.final_cursor.thread); + debug!("AURELIEN: Execution: after metrics for final slot {:?}", exec_out.slot); + debug!("AURELIEN: Execution: end function cache for final slot {:?}", exec_out.slot); } /// Applies an execution output to the active (non-final) state From c67d948cc9cb9b0b502d52d0c6ccf53af7a130e5 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 15 Jun 2023 01:11:50 +0200 Subject: [PATCH 15/24] Add logs in finalize. --- massa-execution-worker/src/execution.rs | 4 ++-- massa-final-state/src/final_state.rs | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index db1b66b86dc..d0d0e1ee348 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -215,12 +215,12 @@ impl ExecutionState { } debug!("AURELIEN: Execution: after stat counter {:?}", exec_out.slot); - debug!("AURELIEN: Execution: before stat counter {:?}", exec_out.slot); + debug!("AURELIEN: Execution: before finalize {:?}", exec_out.slot); // apply state changes to the final ledger self.final_state .write() .finalize(exec_out.slot, exec_out.state_changes); - debug!("AURELIEN: Execution: end stat counter {:?}", exec_out.slot); + debug!("AURELIEN: Execution: after finalize {:?}", exec_out.slot); // update the final ledger's slot self.final_cursor = exec_out.slot; diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index 7b937c3f8a0..4db721b787f 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -530,6 +530,7 @@ impl FinalState { /// /// Panics if the new slot is not the one coming just after the current one. pub fn finalize(&mut self, slot: Slot, changes: StateChanges) { + debug!("AURELIEN: Execution: finalize slot {}: start function", slot); let cur_slot = self.db.read().get_change_id().expect(CHANGE_ID_DESER_ERROR); // check slot consistency let next_slot = cur_slot @@ -546,34 +547,46 @@ impl FinalState { // apply the state changes to the batch + debug!("AURELIEN: Execution: finalize slot {}: before apply async pool", slot); self.async_pool .apply_changes_to_batch(&changes.async_pool_changes, &mut db_batch); + debug!("AURELIEN: Execution: finalize slot {}: after apply async pool", slot); + debug!("AURELIEN: Execution: finalize slot {}: before apply pos state", slot); self.pos_state .apply_changes_to_batch(changes.pos_changes.clone(), slot, true, &mut db_batch) .expect("could not settle slot in final state proof-of-stake"); + debug!("AURELIEN: Execution: finalize slot {}: after apply pos state", slot); // TODO: // do not panic above, it might just mean that the lookback cycle is not available // bootstrap again instead + debug!("AURELIEN: Execution: finalize slot {}: before apply ledger changes", slot); self.ledger .apply_changes_to_batch(changes.ledger_changes.clone(), &mut db_batch); + debug!("AURELIEN: Execution: finalize slot {}: after apply ledger changes", slot); + debug!("AURELIEN: Execution: finalize slot {}: before apply executed ops changes", slot); self.executed_ops.apply_changes_to_batch( changes.executed_ops_changes.clone(), slot, &mut db_batch, ); + debug!("AURELIEN: Execution: finalize slot {}: after apply executed ops changes", slot); + debug!("AURELIEN: Execution: finalize slot {}: before apply executed denunciations changes", slot); self.executed_denunciations.apply_changes_to_batch( changes.executed_denunciations_changes.clone(), slot, &mut db_batch, ); + debug!("AURELIEN: Execution: finalize slot {}: after apply executed denunciations changes", slot); + debug!("AURELIEN: Execution: finalize slot {}: before db write batch", slot); self.db .write() .write_batch(db_batch, Default::default(), Some(slot)); + debug!("AURELIEN: Execution: finalize slot {}: after db write batch", slot); let final_state_hash = self.db.read().get_db_hash(); @@ -581,6 +594,7 @@ impl FinalState { info!("final_state hash at slot {}: {}", slot, final_state_hash); // Backup DB if needed + debug!("AURELIEN: Execution: finalize slot {}: before backup", slot); if slot.period % PERIODS_BETWEEN_BACKUPS == 0 && slot.period != 0 && slot.thread == 0 { let state_slot = self.db.read().get_change_id(); match state_slot { @@ -601,11 +615,15 @@ impl FinalState { self.db.read().backup_db(slot); } + debug!("AURELIEN: Execution: finalize slot {}: after backup", slot); // feed final_state_hash to the last cycle + debug!("AURELIEN: Execution: finalize slot {}: before feed_cycle_state_hash", slot); let cycle = slot.get_cycle(self.config.periods_per_cycle); self.pos_state .feed_cycle_state_hash(cycle, final_state_hash); + debug!("AURELIEN: Execution: finalize slot {}: after feed_cycle_state_hash", slot); + debug!("AURELIEN: Execution: finalize slot {}: end function", slot); } /// After bootstrap or load from disk, recompute all the caches. From 29ffba49ab967ec85fb8e2de6c8e2f05a9d9cd9c Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 15 Jun 2023 01:36:34 +0200 Subject: [PATCH 16/24] Add logs massa db --- Cargo.lock | 1 + massa-db/Cargo.toml | 1 + massa-db/src/massa_db.rs | 25 +++++++++++++++++++++++++ 3 files changed, 27 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index de7f33b54fa..f84d8c530a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2629,6 +2629,7 @@ dependencies = [ "parking_lot", "rocksdb", "thiserror", + "tracing", ] [[package]] diff --git a/massa-db/Cargo.toml b/massa-db/Cargo.toml index b06c258172a..7c5b32a6c0b 100644 --- a/massa-db/Cargo.toml +++ b/massa-db/Cargo.toml @@ -11,6 +11,7 @@ rocksdb = "0.20" displaydoc = "0.2" thiserror = "1.0" lsmtree = "=0.1.1" +tracing = "0.1" # Custom modules massa_hash = { path = "../massa-hash" } diff --git a/massa-db/src/massa_db.rs b/massa-db/src/massa_db.rs index 8a3b88e0da0..318faf15b1a 100644 --- a/massa-db/src/massa_db.rs +++ b/massa-db/src/massa_db.rs @@ -23,6 +23,7 @@ use std::{ path::PathBuf, sync::Arc, }; +use tracing::debug; type Key = Vec; type Value = Vec; @@ -430,6 +431,7 @@ where change_id: Option, reset_history: bool, ) -> Result<(), MassaDBError> { + debug!("AURELIEN: MassaDB: start write changes"); if let Some(change_id) = change_id.clone() { if change_id < self.get_change_id().expect(CHANGE_ID_DESER_ERROR) { return Err(MassaDBError::InvalidChangeID(String::from( @@ -441,34 +443,44 @@ where let handle_state = self.db.cf_handle(STATE_CF).expect(CF_ERROR); let handle_metadata = self.db.cf_handle(METADATA_CF).expect(CF_ERROR); let handle_versioning = self.db.cf_handle(VERSIONING_CF).expect(CF_ERROR); + debug!("AURELIEN: MassaDB: after get handles"); *self.current_batch.lock() = WriteBatch::default(); for (key, value) in changes.iter() { if let Some(value) = value { + debug!("AURELIEN: MassaDB: before insert value"); self.current_batch.lock().put_cf(handle_state, key, value); let key_hash = Hash::compute_from(key); let value_hash = Hash::compute_from(value); + debug!("AURELIEN: MassaDB: after insert value"); + debug!("AURELIEN: MassaDB: before tree update"); self.lsmtree .update( key_hash.to_bytes(), Bytes::from(value_hash.to_bytes().to_vec()), ) .expect(LSMTREE_ERROR); + debug!("AURELIEN: MassaDB: after tree update"); } else { + debug!("AURELIEN: MassaDB: before delete"); self.current_batch.lock().delete_cf(handle_state, key); let key_hash = Hash::compute_from(key); + debug!("AURELIEN: MassaDB: after delete"); + debug!("AURELIEN: MassaDB: before tree remove"); self.lsmtree .remove(key_hash.to_bytes()) .expect(LSMTREE_ERROR); + debug!("AURELIEN: MassaDB: after tree remove"); } } // in versioning_changes, we have the data that we do not want to include in hash // e.g everything that is not in 'Active' state (so hashes remain compatibles) for (key, value) in versioning_changes.iter() { + debug!("AURELIEN: MassaDB: before add versioning change"); if let Some(value) = value { self.current_batch .lock() @@ -476,16 +488,22 @@ where } else { self.current_batch.lock().delete_cf(handle_versioning, key); } + debug!("AURELIEN: MassaDB: after add versioning change"); } + debug!("AURELIEN: MassaDB: before change id"); if let Some(change_id) = change_id { self.set_change_id_to_batch(change_id); } + debug!("AURELIEN: MassaDB: after change id"); + debug!("AURELIEN: MassaDB: before state hash change"); self.current_batch .lock() .put_cf(handle_metadata, STATE_HASH_KEY, self.lsmtree.root()); + debug!("AURELIEN: MassaDB: after state hash change"); + debug!("AURELIEN: MassaDB: before write rocks"); { let mut current_batch_guard = self.current_batch.lock(); let batch = WriteBatch::from_data(current_batch_guard.data()); @@ -495,9 +513,11 @@ where MassaDBError::RocksDBError(format!("Can't write batch to disk: {}", e)) })?; } + debug!("AURELIEN: MassaDB: after write rocks"); self.current_hashmap.write().clear(); + debug!("AURELIEN: MassaDB: before changes history update"); self.change_history .entry(self.get_change_id().expect(CHANGE_ID_DESER_ERROR)) .and_modify(|map| map.extend(changes.clone().into_iter())) @@ -507,7 +527,10 @@ where .entry(self.get_change_id().expect(CHANGE_ID_DESER_ERROR)) .and_modify(|map| map.extend(versioning_changes.clone().into_iter())) .or_insert(versioning_changes); + debug!("AURELIEN: MassaDB: after changes history update"); + + debug!("AURELIEN: MassaDB: before cleaning"); if reset_history { self.change_history.clear(); } @@ -515,7 +538,9 @@ where while self.change_history.len() > self.config.max_history_length { self.change_history.pop_first(); } + debug!("AURELIEN: MassaDB: after cleaning"); + debug!("AURELIEN: MassaDB: end write changes"); Ok(()) } From 2008760f446fa1a46a16a19299f97bc3b5d4bdc3 Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 15 Jun 2023 11:07:04 +0200 Subject: [PATCH 17/24] Add get_hash_kind_version in final state --- massa-final-state/src/final_state.rs | 86 ++++++++++++++++++++++------ massa-node/src/main.rs | 43 ++++++++++++-- massa-versioning/src/versioning.rs | 37 +++++++++++- 3 files changed, 143 insertions(+), 23 deletions(-) diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index 4db721b787f..b7b2de7ef71 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -19,12 +19,13 @@ use massa_ledger_exports::LedgerController; use massa_models::config::PERIODS_BETWEEN_BACKUPS; use massa_models::slot::Slot; use massa_pos_exports::{PoSFinalState, SelectorController}; -use massa_versioning::versioning::MipStore; +use massa_versioning::versioning::{MipComponent, MipStore}; use parking_lot::RwLock; use rocksdb::IteratorMode; use tracing::{debug, info, warn}; +use massa_time::MassaTime; use std::sync::Arc; /// Represents a final state `(ledger, async pool, executed_ops, executed_de and the state of the PoS)` @@ -530,7 +531,10 @@ impl FinalState { /// /// Panics if the new slot is not the one coming just after the current one. pub fn finalize(&mut self, slot: Slot, changes: StateChanges) { - debug!("AURELIEN: Execution: finalize slot {}: start function", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: start function", + slot + ); let cur_slot = self.db.read().get_change_id().expect(CHANGE_ID_DESER_ERROR); // check slot consistency let next_slot = cur_slot @@ -547,46 +551,82 @@ impl FinalState { // apply the state changes to the batch - debug!("AURELIEN: Execution: finalize slot {}: before apply async pool", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before apply async pool", + slot + ); self.async_pool .apply_changes_to_batch(&changes.async_pool_changes, &mut db_batch); - debug!("AURELIEN: Execution: finalize slot {}: after apply async pool", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after apply async pool", + slot + ); - debug!("AURELIEN: Execution: finalize slot {}: before apply pos state", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before apply pos state", + slot + ); self.pos_state .apply_changes_to_batch(changes.pos_changes.clone(), slot, true, &mut db_batch) .expect("could not settle slot in final state proof-of-stake"); - debug!("AURELIEN: Execution: finalize slot {}: after apply pos state", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after apply pos state", + slot + ); // TODO: // do not panic above, it might just mean that the lookback cycle is not available // bootstrap again instead - debug!("AURELIEN: Execution: finalize slot {}: before apply ledger changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before apply ledger changes", + slot + ); self.ledger .apply_changes_to_batch(changes.ledger_changes.clone(), &mut db_batch); - debug!("AURELIEN: Execution: finalize slot {}: after apply ledger changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after apply ledger changes", + slot + ); - debug!("AURELIEN: Execution: finalize slot {}: before apply executed ops changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before apply executed ops changes", + slot + ); self.executed_ops.apply_changes_to_batch( changes.executed_ops_changes.clone(), slot, &mut db_batch, ); - debug!("AURELIEN: Execution: finalize slot {}: after apply executed ops changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after apply executed ops changes", + slot + ); - debug!("AURELIEN: Execution: finalize slot {}: before apply executed denunciations changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before apply executed denunciations changes", + slot + ); self.executed_denunciations.apply_changes_to_batch( changes.executed_denunciations_changes.clone(), slot, &mut db_batch, ); - debug!("AURELIEN: Execution: finalize slot {}: after apply executed denunciations changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after apply executed denunciations changes", + slot + ); - debug!("AURELIEN: Execution: finalize slot {}: before db write batch", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before db write batch", + slot + ); self.db .write() .write_batch(db_batch, Default::default(), Some(slot)); - debug!("AURELIEN: Execution: finalize slot {}: after db write batch", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after db write batch", + slot + ); let final_state_hash = self.db.read().get_db_hash(); @@ -618,11 +658,17 @@ impl FinalState { debug!("AURELIEN: Execution: finalize slot {}: after backup", slot); // feed final_state_hash to the last cycle - debug!("AURELIEN: Execution: finalize slot {}: before feed_cycle_state_hash", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before feed_cycle_state_hash", + slot + ); let cycle = slot.get_cycle(self.config.periods_per_cycle); self.pos_state .feed_cycle_state_hash(cycle, final_state_hash); - debug!("AURELIEN: Execution: finalize slot {}: after feed_cycle_state_hash", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after feed_cycle_state_hash", + slot + ); debug!("AURELIEN: Execution: finalize slot {}: end function", slot); } @@ -724,4 +770,12 @@ impl FinalState { true } + + fn get_hash_kind_version(&self) -> u32 { + // Temp code + // Return version for hash kind of final state: 0 -> LSM, 1 -> Xor + let now = MassaTime::now().expect("Cannot get current time"); + self.mip_store + .get_latest_component_version_at(&MipComponent::FinalStateHashKind, now) + } } diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 5d290929425..55ee05cf01d 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -84,14 +84,12 @@ use massa_protocol_exports::{ProtocolConfig, ProtocolManager}; use massa_protocol_worker::{create_protocol_controller, start_protocol_controller}; use massa_storage::Storage; use massa_time::MassaTime; -use massa_versioning::{ - mips::MIP_LIST, - versioning::{MipStatsConfig, MipStore}, -}; +use massa_versioning::versioning::{MipComponent, MipInfo, MipState}; +use massa_versioning::versioning::{MipStatsConfig, MipStore}; use massa_wallet::Wallet; use parking_lot::RwLock; use peernet::transports::TransportType; -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Condvar, Mutex}; @@ -277,8 +275,41 @@ async fn launch( block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, counters_max: MIP_STORE_STATS_COUNTERS_MAX, }; + let mip_0001_start = MassaTime::from_utc_ymd_hms(2023, 6, 14, 15, 0, 0).unwrap(); + let mip_0001_timeout = MassaTime::from_utc_ymd_hms(2023, 6, 14, 16, 0, 0).unwrap(); + let mip_0001_defined_start = MassaTime::from_utc_ymd_hms(2023, 2, 14, 14, 30, 0).unwrap(); + let mip_0002_start = MassaTime::from_utc_ymd_hms(2023, 6, 16, 13, 0, 0).unwrap(); + let mip_0002_timeout = MassaTime::from_utc_ymd_hms(2023, 6, 16, 16, 0, 0).unwrap(); + let mip_0002_defined_start = MassaTime::from_utc_ymd_hms(2023, 2, 19, 12, 0, 0).unwrap(); + let mip_list_1: [(MipInfo, MipState); 2] = [ + ( + MipInfo { + name: "MIP-0001".to_string(), + version: 1, + components: BTreeMap::from([ + (MipComponent::Address, 1), + (MipComponent::KeyPair, 1), + ]), + start: mip_0001_start, + timeout: mip_0001_timeout, + activation_delay: MassaTime::from_millis(100), + }, + MipState::new(mip_0001_defined_start), + ), + ( + MipInfo { + name: "MIP-0002".to_string(), + version: 2, + components: BTreeMap::from([(MipComponent::FinalStateHashKind, 1)]), + start: mip_0002_start, + timeout: mip_0002_timeout, + activation_delay: T0.saturating_mul(PERIODS_PER_CYCLE.saturating_add(1)), + }, + MipState::new(mip_0002_defined_start), + ), + ]; let mip_store = - MipStore::try_from((MIP_LIST, mip_stats_config)).expect("mip store creation failed"); + MipStore::try_from((mip_list_1, mip_stats_config)).expect("mip store creation failed"); // Create final state, either from a snapshot, or from scratch let final_state = Arc::new(parking_lot::RwLock::new( diff --git a/massa-versioning/src/versioning.rs b/massa-versioning/src/versioning.rs index 04b2cfe5431..8b44692e2ad 100644 --- a/massa-versioning/src/versioning.rs +++ b/massa-versioning/src/versioning.rs @@ -35,6 +35,7 @@ pub enum MipComponent { KeyPair, Block, VM, + FinalStateHashKind, #[doc(hidden)] #[num_enum(default)] __Nonexhaustive, @@ -558,6 +559,13 @@ impl MipStore { lock.update_with(lock_other.deref()) } + // Query + + pub fn get_latest_component_version_at(&self, component: &MipComponent, ts: MassaTime) -> u32 { + let lock = self.0.read(); + lock.get_latest_component_version_at(component, ts) + } + // GRPC /// Retrieve a list of MIP info with their corresponding state (as id) - used for grpc API @@ -893,7 +901,7 @@ impl MipStoreRaw { let vote_ratio = Amount::from_mantissa_scale(vote_ratio_.round() as u64, 0); - debug!("(VERSIONING LOG) vote_ratio = {} (from version counter = {} and blocks considered = {})", vote_ratio, network_version_count, block_count_considered); + debug!("[VERSIONING STATS] vote_ratio = {} (from version counter = {} and blocks considered = {})", vote_ratio, network_version_count, block_count_considered); let advance_msg = Advance { start_timestamp: mi.start, @@ -908,6 +916,33 @@ impl MipStoreRaw { } } + // Query + + fn get_latest_component_version_at(&self, component: &MipComponent, ts: MassaTime) -> u32 { + // TODO: duplicated code with the same function in versioning_factory - factorize + + let version = self + .store + .iter() + .rev() + .filter(|(vi, vsh)| { + vi.components.get(component).is_some() + && matches!(vsh.state, ComponentState::Active(_)) + }) + .find_map(|(vi, vsh)| { + let res = vsh.state_at(ts, vi.start, vi.timeout); + match res { + Ok(ComponentStateTypeId::Active) => vi.components.get(&component).copied(), + _ => None, + } + }) + .unwrap_or(0); + + version + } + + // Network restart + /// Check if store is coherent with given last network shutdown /// On a network shutdown, the MIP infos will be edited but we still need to check if this is coherent fn is_coherent_with_shutdown_period( From b368bebda5b76cd8332f735ec7f365d51931c772 Mon Sep 17 00:00:00 2001 From: Leo-Besancon Date: Thu, 15 Jun 2023 11:24:08 +0200 Subject: [PATCH 18/24] Propagated the only_use_xor bool --- massa-async-pool/src/pool.rs | 4 +- massa-bootstrap/src/client.rs | 14 +- massa-bootstrap/src/tests/scenarios.rs | 8 +- massa-bootstrap/src/tests/tools.rs | 15 +- massa-db/src/constants.rs | 1 + massa-db/src/massa_db.rs | 61 ++++-- .../src/executed_denunciations.rs | 4 +- massa-executed-ops/src/executed_ops.rs | 16 +- massa-execution-worker/src/execution.rs | 94 ++++++-- massa-execution-worker/src/tests/mock.rs | 4 +- .../src/tests/scenarios_mandatories.rs | 2 +- massa-execution-worker/src/worker.rs | 35 ++- massa-final-state/src/final_state.rs | 204 ++++++++++++------ massa-final-state/src/tests/scenarios.rs | 2 +- massa-ledger-exports/src/controller.rs | 4 +- massa-ledger-worker/src/ledger.rs | 9 +- massa-ledger-worker/src/ledger_db.rs | 17 +- .../src/test_exports/bootstrap.rs | 2 +- massa-pool-worker/src/operation_pool.rs | 20 +- massa-pos-exports/src/pos_final_state.rs | 20 +- massa-versioning/src/versioning.rs | 10 +- 21 files changed, 389 insertions(+), 157 deletions(-) diff --git a/massa-async-pool/src/pool.rs b/massa-async-pool/src/pool.rs index 8f934984138..3687feca60e 100644 --- a/massa-async-pool/src/pool.rs +++ b/massa-async-pool/src/pool.rs @@ -269,10 +269,10 @@ impl AsyncPool { /// Resets the pool to its initial state /// /// USED ONLY FOR BOOTSTRAP - pub fn reset(&mut self) { + pub fn reset(&mut self, only_use_xor: bool) { self.db .write() - .delete_prefix(ASYNC_POOL_PREFIX, STATE_CF, None); + .delete_prefix(ASYNC_POOL_PREFIX, STATE_CF, None, only_use_xor); self.recompute_message_info_cache(); } diff --git a/massa-bootstrap/src/client.rs b/massa-bootstrap/src/client.rs index 45d85d0d5e1..d88320163e4 100644 --- a/massa-bootstrap/src/client.rs +++ b/massa-bootstrap/src/client.rs @@ -416,11 +416,13 @@ pub fn get_state( { let mut final_state_guard = final_state.write(); + let only_use_xor = final_state_guard.get_only_use_xor(); + if !bootstrap_config.keep_ledger { // load ledger from initial ledger file final_state_guard .ledger - .load_initial_ledger() + .load_initial_ledger(only_use_xor) .map_err(|err| { BootstrapError::GeneralError(format!( "could not load initial ledger: {}", @@ -439,10 +441,12 @@ pub fn get_state( ); // TODO: should receive ver batch here? - final_state_guard - .db - .write() - .write_batch(batch, Default::default(), Some(slot)); + final_state_guard.db.write().write_batch( + batch, + Default::default(), + Some(slot), + only_use_xor, + ); } return Ok(GlobalBootstrapState::new(final_state)); } diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 929c2d42039..7b0dbb09a9e 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -322,13 +322,13 @@ fn test_bootstrap_server() { final_write .db .write() - .write_batch(batch, Default::default(), Some(next)); + .write_batch(batch, Default::default(), Some(next), false); let final_state_hash = final_write.db.read().get_db_hash(); let cycle = next.get_cycle(final_state_local_config.periods_per_cycle.clone()); final_write .pos_state - .feed_cycle_state_hash(cycle, final_state_hash); + .feed_cycle_state_hash(cycle, final_state_hash, false); current_slot = next; } @@ -462,13 +462,13 @@ fn test_bootstrap_server() { final_write .db .write() - .write_batch(batch, Default::default(), Some(next)); + .write_batch(batch, Default::default(), Some(next), false); let final_state_hash = final_write.db.read().get_db_hash(); let cycle = next.get_cycle(final_state_local_config.periods_per_cycle.clone()); final_write .pos_state - .feed_cycle_state_hash(cycle, final_state_hash); + .feed_cycle_state_hash(cycle, final_state_hash, false); let mut list_changes_write = list_changes_clone.write(); list_changes_write.push((next, changes)); diff --git a/massa-bootstrap/src/tests/tools.rs b/massa-bootstrap/src/tests/tools.rs index 086e298b0f6..0ea8b317e70 100644 --- a/massa-bootstrap/src/tests/tools.rs +++ b/massa-bootstrap/src/tests/tools.rs @@ -179,14 +179,18 @@ fn get_random_pos_state(r_limit: u64, mut pos: PoSFinalState) -> PoSFinalState { pos.create_initial_cycle(&mut batch); - pos.db.write().write_batch(batch, Default::default(), None); + pos.db + .write() + .write_batch(batch, Default::default(), None, false); let mut batch = DBBatch::new(); pos.apply_changes_to_batch(changes, Slot::new(0, 0), false, &mut batch) .expect("Critical: Error while applying changes to pos_state"); - pos.db.write().write_batch(batch, Default::default(), None); + pos.db + .write() + .write_batch(batch, Default::default(), None, false); pos } @@ -230,7 +234,8 @@ pub fn get_random_executed_ops( let mut executed_ops = ExecutedOps::new(config.clone(), db.clone()); let mut batch = DBBatch::new(); executed_ops.apply_changes_to_batch(get_random_executed_ops_changes(10), slot, &mut batch); - db.write().write_batch(batch, Default::default(), None); + db.write() + .write_batch(batch, Default::default(), None, false); executed_ops } @@ -264,7 +269,7 @@ pub fn get_random_executed_de( executed_de .db .write() - .write_batch(batch, Default::default(), None); + .write_batch(batch, Default::default(), None, false); executed_de } @@ -318,7 +323,7 @@ pub fn get_random_final_state_bootstrap( async_pool .db .write() - .write_batch(batch, versioning_batch, None); + .write_batch(batch, versioning_batch, None, false); let executed_ops = get_random_executed_ops( r_limit, diff --git a/massa-db/src/constants.rs b/massa-db/src/constants.rs index d74b41e186c..9bbe76441ca 100644 --- a/massa-db/src/constants.rs +++ b/massa-db/src/constants.rs @@ -8,6 +8,7 @@ pub const STATE_CF: &str = "state"; pub const VERSIONING_CF: &str = "versioning"; pub const STATE_HASH_KEY: &[u8; 1] = b"h"; +pub const STATE_HASH_XOR_KEY: &[u8; 1] = b"x"; pub const STATE_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; pub const CHANGE_ID_KEY: &[u8; 1] = b"c"; diff --git a/massa-db/src/massa_db.rs b/massa-db/src/massa_db.rs index 318faf15b1a..aba57aed439 100644 --- a/massa-db/src/massa_db.rs +++ b/massa-db/src/massa_db.rs @@ -1,7 +1,7 @@ use crate::{ MassaDBError, CF_ERROR, CHANGE_ID_DESER_ERROR, CHANGE_ID_KEY, CHANGE_ID_SER_ERROR, CRUD_ERROR, LSMTREE_ERROR, LSMTREE_NODES_CF, LSMTREE_VALUES_CF, METADATA_CF, OPEN_ERROR, STATE_CF, - STATE_HASH_ERROR, STATE_HASH_INITIAL_BYTES, STATE_HASH_KEY, VERSIONING_CF, + STATE_HASH_ERROR, STATE_HASH_INITIAL_BYTES, STATE_HASH_KEY, STATE_HASH_XOR_KEY, VERSIONING_CF, }; use lsmtree::{bytes::Bytes, BadProof, KVStore, SparseMerkleTree}; use massa_hash::{Hash, SmtHasher}; @@ -430,6 +430,7 @@ where versioning_changes: BTreeMap>, change_id: Option, reset_history: bool, + only_use_xor: bool, ) -> Result<(), MassaDBError> { debug!("AURELIEN: MassaDB: start write changes"); if let Some(change_id) = change_id.clone() { @@ -445,6 +446,8 @@ where let handle_versioning = self.db.cf_handle(VERSIONING_CF).expect(CF_ERROR); debug!("AURELIEN: MassaDB: after get handles"); + let _current_xor_hash = self.get_db_hash_xor(); + *self.current_batch.lock() = WriteBatch::default(); for (key, value) in changes.iter() { @@ -453,16 +456,20 @@ where self.current_batch.lock().put_cf(handle_state, key, value); let key_hash = Hash::compute_from(key); let value_hash = Hash::compute_from(value); + let _key_value_hash = + Hash::compute_from(&[key.as_slice(), value.as_slice()].concat()); debug!("AURELIEN: MassaDB: after insert value"); - debug!("AURELIEN: MassaDB: before tree update"); - self.lsmtree - .update( - key_hash.to_bytes(), - Bytes::from(value_hash.to_bytes().to_vec()), - ) - .expect(LSMTREE_ERROR); - debug!("AURELIEN: MassaDB: after tree update"); + if !only_use_xor { + debug!("AURELIEN: MassaDB: before tree update"); + self.lsmtree + .update( + key_hash.to_bytes(), + Bytes::from(value_hash.to_bytes().to_vec()), + ) + .expect(LSMTREE_ERROR); + debug!("AURELIEN: MassaDB: after tree update"); + } } else { debug!("AURELIEN: MassaDB: before delete"); self.current_batch.lock().delete_cf(handle_state, key); @@ -529,7 +536,6 @@ where .or_insert(versioning_changes); debug!("AURELIEN: MassaDB: after changes history update"); - debug!("AURELIEN: MassaDB: before cleaning"); if reset_history { self.change_history.clear(); @@ -633,6 +639,7 @@ where versioning_changes, Some(stream_changes.change_id), true, + false, )?; Ok((new_cursor, new_cursor_versioning)) @@ -656,6 +663,25 @@ where Hash::from_bytes(state_hash_bytes.try_into().expect(STATE_HASH_ERROR)) }) } + + /// Get the current state hash xor of the database + pub fn get_db_hash_xor(&self) -> Hash { + self.get_db_hash_opt_xor() + .unwrap_or(Hash::from_bytes(STATE_HASH_INITIAL_BYTES)) + } + + /// Get the current state hash xor of the database + fn get_db_hash_opt_xor(&self) -> Option { + let db = &self.db; + let handle = db.cf_handle(METADATA_CF).expect(CF_ERROR); + + db.get_cf(handle, STATE_HASH_XOR_KEY) + .expect(CRUD_ERROR) + .as_deref() + .map(|state_hash_bytes| { + Hash::from_bytes(state_hash_bytes.try_into().expect(STATE_HASH_ERROR)) + }) + } } impl RawMassaDB { @@ -749,8 +775,9 @@ impl RawMassaDB { batch: DBBatch, versioning_batch: DBBatch, change_id: Option, + only_use_xor: bool, ) { - self.write_changes(batch, versioning_batch, change_id, false) + self.write_changes(batch, versioning_batch, change_id, false, only_use_xor) .expect(CRUD_ERROR); } @@ -765,7 +792,13 @@ impl RawMassaDB { } /// Utility function to delete all keys in a prefix - pub fn delete_prefix(&mut self, prefix: &str, handle_str: &str, change_id: Option) { + pub fn delete_prefix( + &mut self, + prefix: &str, + handle_str: &str, + change_id: Option, + only_use_xor: bool, + ) { let db = &self.db; let handle = db.cf_handle(handle_str).expect(CF_ERROR); @@ -780,10 +813,10 @@ impl RawMassaDB { match handle_str { STATE_CF => { - self.write_batch(batch, DBBatch::new(), change_id); + self.write_batch(batch, DBBatch::new(), change_id, only_use_xor); } VERSIONING_CF => { - self.write_batch(DBBatch::new(), batch, change_id); + self.write_batch(DBBatch::new(), batch, change_id, only_use_xor); } _ => {} } diff --git a/massa-executed-ops/src/executed_denunciations.rs b/massa-executed-ops/src/executed_denunciations.rs index 70e9db07f0f..b5e19dc964f 100644 --- a/massa-executed-ops/src/executed_denunciations.rs +++ b/massa-executed-ops/src/executed_denunciations.rs @@ -105,10 +105,10 @@ impl ExecutedDenunciations { /// Reset the executed denunciations /// /// USED FOR BOOTSTRAP ONLY - pub fn reset(&mut self) { + pub fn reset(&mut self, only_use_xor: bool) { { let mut db = self.db.write(); - db.delete_prefix(EXECUTED_DENUNCIATIONS_PREFIX, STATE_CF, None); + db.delete_prefix(EXECUTED_DENUNCIATIONS_PREFIX, STATE_CF, None, only_use_xor); } self.recompute_sorted_denunciations(); diff --git a/massa-executed-ops/src/executed_ops.rs b/massa-executed-ops/src/executed_ops.rs index cb5384d8fa8..343acfcb7ae 100644 --- a/massa-executed-ops/src/executed_ops.rs +++ b/massa-executed-ops/src/executed_ops.rs @@ -126,10 +126,10 @@ impl ExecutedOps { /// Reset the executed operations /// /// USED FOR BOOTSTRAP ONLY - pub fn reset(&mut self) { + pub fn reset(&mut self, only_use_xor: bool) { self.db .write() - .delete_prefix(EXECUTED_OPS_PREFIX, STATE_CF, None); + .delete_prefix(EXECUTED_OPS_PREFIX, STATE_CF, None, only_use_xor); self.recompute_sorted_ops_and_op_exec_status(); } @@ -322,15 +322,18 @@ fn test_executed_ops_hash_computing() { let mut batch_a = DBBatch::new(); a.apply_changes_to_batch(change_a, apply_slot, &mut batch_a); - db_a.write().write_batch(batch_a, Default::default(), None); + db_a.write() + .write_batch(batch_a, Default::default(), None, false); let mut batch_b = DBBatch::new(); a.apply_changes_to_batch(change_b, apply_slot, &mut batch_b); - db_a.write().write_batch(batch_b, Default::default(), None); + db_a.write() + .write_batch(batch_b, Default::default(), None, false); let mut batch_c = DBBatch::new(); c.apply_changes_to_batch(change_c, apply_slot, &mut batch_c); - db_c.write().write_batch(batch_c, Default::default(), None); + db_c.write() + .write_batch(batch_c, Default::default(), None, false); // check that a.hash ^ $(change_b) = c.hash assert_ne!( @@ -350,7 +353,8 @@ fn test_executed_ops_hash_computing() { }; let mut batch_a = DBBatch::new(); a.prune_to_batch(prune_slot, &mut batch_a); - db_a.write().write_batch(batch_a, Default::default(), None); + db_a.write() + .write_batch(batch_a, Default::default(), None, false); // at this point the hash should have been reset to its original value assert_eq!( diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index d0d0e1ee348..35e986da6f2 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -197,13 +197,19 @@ impl ExecutionState { /// # Arguments /// * `exec_out`: execution output to apply pub fn apply_final_execution_output(&mut self, mut exec_out: ExecutionOutput) { - debug!("AURELIEN: Execution: start function cache for final slot {:?}", exec_out.slot); + debug!( + "AURELIEN: Execution: start function cache for final slot {:?}", + exec_out.slot + ); if self.final_cursor >= exec_out.slot { panic!("attempting to apply a final execution output at or before the current final_cursor"); } // count stats - debug!("AURELIEN: Execution: before stat counter {:?}", exec_out.slot); + debug!( + "AURELIEN: Execution: before stat counter {:?}", + exec_out.slot + ); if exec_out.block_id.is_some() { self.stats_counter.register_final_blocks(1); self.stats_counter.register_final_executed_operations( @@ -213,7 +219,10 @@ impl ExecutionState { exec_out.state_changes.executed_denunciations_changes.len(), ); } - debug!("AURELIEN: Execution: after stat counter {:?}", exec_out.slot); + debug!( + "AURELIEN: Execution: after stat counter {:?}", + exec_out.slot + ); debug!("AURELIEN: Execution: before finalize {:?}", exec_out.slot); // apply state changes to the final ledger @@ -232,20 +241,35 @@ impl ExecutionState { } // append generated events to the final event store - debug!("AURELIEN: Execution: before finalize events for final slot {:?}", exec_out.slot); + debug!( + "AURELIEN: Execution: before finalize events for final slot {:?}", + exec_out.slot + ); exec_out.events.finalize(); self.final_events.extend(exec_out.events); self.final_events.prune(self.config.max_final_events); - debug!("AURELIEN: Execution: after finalize events for final slot {:?}", exec_out.slot); + debug!( + "AURELIEN: Execution: after finalize events for final slot {:?}", + exec_out.slot + ); // update the prometheus metrics - debug!("AURELIEN: Execution: before metrics for final slot {:?}", exec_out.slot); + debug!( + "AURELIEN: Execution: before metrics for final slot {:?}", + exec_out.slot + ); self.massa_metrics .set_active_cursor(self.active_cursor.period, self.active_cursor.thread); self.massa_metrics .set_final_cursor(self.final_cursor.period, self.final_cursor.thread); - debug!("AURELIEN: Execution: after metrics for final slot {:?}", exec_out.slot); - debug!("AURELIEN: Execution: end function cache for final slot {:?}", exec_out.slot); + debug!( + "AURELIEN: Execution: after metrics for final slot {:?}", + exec_out.slot + ); + debug!( + "AURELIEN: Execution: end function cache for final slot {:?}", + exec_out.slot + ); } /// Applies an execution output to the active (non-final) state @@ -1288,10 +1312,16 @@ impl ExecutionState { // apply the cached output and return self.apply_final_execution_output(exec_out.clone()); - debug!("AURELIEN: Execution: end apply cache for final slot {:?}", slot); + debug!( + "AURELIEN: Execution: end apply cache for final slot {:?}", + slot + ); // update versioning stats self.update_versioning_stats(exec_target, slot); - debug!("AURELIEN: Execution: end versioning update for final slot {:?}", slot); + debug!( + "AURELIEN: Execution: end versioning update for final slot {:?}", + slot + ); debug!("execute_final_slot: found in cache, applied cache and updated versioning stats"); @@ -1327,7 +1357,10 @@ impl ExecutionState { ); } - debug!("AURELIEN: Execution: start recompute for final slot {:?}", slot); + debug!( + "AURELIEN: Execution: start recompute for final slot {:?}", + slot + ); // truncate the whole execution queue self.active_history.write().0.clear(); self.active_cursor = self.final_cursor; @@ -1723,7 +1756,10 @@ impl ExecutionState { exec_target: Option<&(BlockId, Storage)>, slot: &Slot, ) { - debug!("AURELIEN: Execution: Start versioning update for {:?}", slot); + debug!( + "AURELIEN: Execution: Start versioning update for {:?}", + slot + ); // update versioning statistics if let Some((block_id, storage)) = exec_target { if let Some(block) = storage.read_blocks().get(block_id) { @@ -1736,13 +1772,19 @@ impl ExecutionState { let current_version = block.content.header.content.current_version; let announced_version = block.content.header.content.announced_version; - debug!("AURELIEN: Execution: before update_network_version_stats update for {:?}", slot); + debug!( + "AURELIEN: Execution: before update_network_version_stats update for {:?}", + slot + ); if let Ok(slot_ts) = slot_ts_ { self.mip_store.update_network_version_stats( slot_ts, Some((current_version, announced_version)), ); - debug!("AURELIEN: Execution: after update_network_version_stats update for {:?}", slot); + debug!( + "AURELIEN: Execution: after update_network_version_stats update for {:?}", + slot + ); // Now write mip store changes to disk (if any) let mut db_batch = DBBatch::new(); @@ -1756,7 +1798,10 @@ impl ExecutionState { ) .unwrap(); - debug!("AURELIEN: Execution: before update_batches update for {:?}", slot); + debug!( + "AURELIEN: Execution: before update_batches update for {:?}", + slot + ); self.mip_store .update_batches( &mut db_batch, @@ -1769,15 +1814,28 @@ impl ExecutionState { slot_prev_ts, slot_ts, e ) }); - debug!("AURELIEN: Execution: after update_batches update for {:?}", slot); + debug!( + "AURELIEN: Execution: after update_batches update for {:?}", + slot + ); + + debug!( + "AURELIEN: Execution: after update_batches update for {:?}", + slot + ); + + let use_only_xor = self.final_state.read().get_only_use_xor(); - debug!("AURELIEN: Execution: after update_batches update for {:?}", slot); self.final_state.write().db.write().write_batch( db_batch, db_versioning_batch, None, + use_only_xor, + ); + debug!( + "AURELIEN: Execution: after write_batch update for {:?}", + slot ); - debug!("AURELIEN: Execution: after write_batch update for {:?}", slot); } else { warn!("Unable to get slot timestamp for slot: {} in order to update mip_store stats", slot); } diff --git a/massa-execution-worker/src/tests/mock.rs b/massa-execution-worker/src/tests/mock.rs index eea9b1e81b2..882dbb7c8b6 100644 --- a/massa-execution-worker/src/tests/mock.rs +++ b/massa-execution-worker/src/tests/mock.rs @@ -89,7 +89,7 @@ pub fn get_sample_state( let db = Arc::new(RwLock::new(MassaDB::new(db_config))); let mut ledger = FinalLedger::new(ledger_config.clone(), db.clone()); - ledger.load_initial_ledger().unwrap(); + ledger.load_initial_ledger(false).unwrap(); let default_config = FinalStateConfig::default(); let cfg = FinalStateConfig { ledger_config, @@ -145,7 +145,7 @@ pub fn get_sample_state( final_state .db .write() - .write_batch(batch, Default::default(), None); + .write_batch(batch, Default::default(), None, false); final_state.compute_initial_draws().unwrap(); Ok((Arc::new(RwLock::new(final_state)), tempfile, tempdir)) } diff --git a/massa-execution-worker/src/tests/scenarios_mandatories.rs b/massa-execution-worker/src/tests/scenarios_mandatories.rs index 88336b76c5f..888176bad36 100644 --- a/massa-execution-worker/src/tests/scenarios_mandatories.rs +++ b/massa-execution-worker/src/tests/scenarios_mandatories.rs @@ -1631,7 +1631,7 @@ mod tests { .write() .db .write() - .write_batch(batch, Default::default(), None); + .write_batch(batch, Default::default(), None, false); // create operation 1 let operation1 = Operation::new_verifiable( diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 474331dea4a..a55b0ab7357 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -96,9 +96,18 @@ impl ExecutionThread { let (req, resp_tx) = req_resp.into_request_sender_pair(); // Acquire write access to the execution state (for cache updates) and execute the read-only request - debug!("AURELIEN: Execution: start execute_one_readonly_request {:?}", &req); - let outcome = self.execution_state.write().execute_readonly_request(req.clone()); - debug!("AURELIEN: Execution: end execute_one_readonly_request {:?}", req); + debug!( + "AURELIEN: Execution: start execute_one_readonly_request {:?}", + &req + ); + let outcome = self + .execution_state + .write() + .execute_readonly_request(req.clone()); + debug!( + "AURELIEN: Execution: end execute_one_readonly_request {:?}", + req + ); // Send the execution output through resp_tx. // Ignore errors because they just mean that the request emitter dropped the received @@ -197,21 +206,33 @@ impl ExecutionThread { let run_result = self.slot_sequencer.run_task_with( |is_final: bool, slot: &Slot, content: Option<&(BlockId, Storage)>| { if is_final { - debug!("AURELIEN: Execution: start execution final of slot {:?}", slot); + debug!( + "AURELIEN: Execution: start execution final of slot {:?}", + slot + ); self.execution_state.write().execute_final_slot( slot, content, self.selector.clone(), ); - debug!("AURELIEN: Execution: end execution final of slot {:?}", slot); + debug!( + "AURELIEN: Execution: end execution final of slot {:?}", + slot + ); } else { - debug!("AURELIEN: Execution: start execution candidate of slot {:?}", slot); + debug!( + "AURELIEN: Execution: start execution candidate of slot {:?}", + slot + ); self.execution_state.write().execute_candidate_slot( slot, content, self.selector.clone(), ); - debug!("AURELIEN: Execution: end execution candidate of slot {:?}", slot); + debug!( + "AURELIEN: Execution: end execution candidate of slot {:?}", + slot + ); } }, ); diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index 4db721b787f..1163ba7ded5 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -58,6 +58,15 @@ pub struct FinalState { } impl FinalState { + /// Temporary getter to know if we should compute the lsm tree during db writes + pub fn get_only_use_xor(&self) -> bool { + self.get_hash_kind_version() == 1 + } + + fn get_hash_kind_version(&self) -> u32 { + 0 + } + /// Initializes a new `FinalState` /// /// # Arguments @@ -124,10 +133,13 @@ impl FinalState { }; if reset_final_state { - final_state.async_pool.reset(); - final_state.pos_state.reset(); - final_state.executed_ops.reset(); - final_state.executed_denunciations.reset(); + // TO UPDATE + let only_use_xor = final_state.get_only_use_xor(); + + final_state.async_pool.reset(only_use_xor); + final_state.pos_state.reset(only_use_xor); + final_state.executed_ops.reset(only_use_xor); + final_state.executed_denunciations.reset(only_use_xor); final_state.db.read().set_initial_change_id(slot); } @@ -171,10 +183,12 @@ impl FinalState { if cfg!(feature = "testing") { let mut batch = DBBatch::new(); final_state.pos_state.create_initial_cycle(&mut batch); - final_state - .db - .write() - .write_batch(batch, Default::default(), Some(recovered_slot)); + final_state.db.write().write_batch( + batch, + Default::default(), + Some(recovered_slot), + false, + ); } final_state.last_slot_before_downtime = Some(recovered_slot); @@ -237,14 +251,17 @@ impl FinalState { // We compute the draws here because we need to feed_cycles when interpolating final_state.compute_initial_draws()?; - final_state.interpolate_downtime()?; + // TO UPDATE + let only_use_xor = final_state.get_only_use_xor(); + + final_state.interpolate_downtime(only_use_xor)?; Ok(final_state) } /// Once we created a FinalState from a snapshot, we need to edit it to attach at the end_slot and handle the downtime. /// This basically recreates the history of the final_state, without executing the slots. - fn interpolate_downtime(&mut self) -> Result<(), FinalStateError> { + fn interpolate_downtime(&mut self, only_use_xor: bool) -> Result<(), FinalStateError> { let current_slot = self.db.read().get_change_id().map_err(|_| { FinalStateError::InvalidSlot(String::from("Could not get slot in db")) @@ -259,7 +276,7 @@ impl FinalState { if current_slot_cycle == end_slot_cycle { // In that case, we just complete the gap in the same cycle - self.interpolate_single_cycle(current_slot, end_slot)?; + self.interpolate_single_cycle(current_slot, end_slot, only_use_xor)?; } else { // Here, we we also complete the cycle_infos in between self.interpolate_multiple_cycles( @@ -267,6 +284,7 @@ impl FinalState { end_slot, current_slot_cycle, end_slot_cycle, + only_use_xor, )?; } @@ -280,8 +298,9 @@ impl FinalState { // feed final_state_hash to the last cycle let cycle = end_slot.get_cycle(self.config.periods_per_cycle); + self.pos_state - .feed_cycle_state_hash(cycle, final_state_hash); + .feed_cycle_state_hash(cycle, final_state_hash, only_use_xor); Ok(()) } @@ -291,6 +310,7 @@ impl FinalState { &mut self, current_slot: Slot, end_slot: Slot, + only_use_xor: bool, ) -> Result<(), FinalStateError> { let latest_snapshot_cycle = self.pos_state @@ -307,10 +327,12 @@ impl FinalState { self.pos_state .delete_cycle_info(latest_snapshot_cycle.0, &mut batch); - self.pos_state - .db - .write() - .write_batch(batch, Default::default(), Some(end_slot)); + self.pos_state.db.write().write_batch( + batch, + Default::default(), + Some(end_slot), + only_use_xor, + ); let mut batch = DBBatch::new(); @@ -325,10 +347,12 @@ impl FinalState { ) .map_err(|err| FinalStateError::PosError(format!("{}", err)))?; - self.pos_state - .db - .write() - .write_batch(batch, Default::default(), Some(end_slot)); + self.pos_state.db.write().write_batch( + batch, + Default::default(), + Some(end_slot), + only_use_xor, + ); Ok(()) } @@ -340,6 +364,7 @@ impl FinalState { end_slot: Slot, current_slot_cycle: u64, end_slot_cycle: u64, + only_use_xor: bool, ) -> Result<(), FinalStateError> { let latest_snapshot_cycle = self.pos_state @@ -356,10 +381,12 @@ impl FinalState { self.pos_state .delete_cycle_info(latest_snapshot_cycle.0, &mut batch); - self.pos_state - .db - .write() - .write_batch(batch, Default::default(), Some(end_slot)); + self.pos_state.db.write().write_batch( + batch, + Default::default(), + Some(end_slot), + only_use_xor, + ); // Firstly, complete the first cycle let last_slot = Slot::new_last_of_cycle( @@ -387,13 +414,15 @@ impl FinalState { ) .map_err(|err| FinalStateError::PosError(format!("{}", err)))?; - self.pos_state - .db - .write() - .write_batch(batch, Default::default(), Some(end_slot)); + self.pos_state.db.write().write_batch( + batch, + Default::default(), + Some(end_slot), + only_use_xor, + ); // Feed final_state_hash to the completed cycle - self.feed_cycle_hash_and_selector_for_interpolation(current_slot_cycle)?; + self.feed_cycle_hash_and_selector_for_interpolation(current_slot_cycle, only_use_xor)?; // TODO: Bring back the following optimisation (it fails because of selector) // Then, build all the completed cycles in betweens. If we have to build more cycles than the cycle_history_length, we only build the last ones. @@ -433,13 +462,15 @@ impl FinalState { ) .map_err(|err| FinalStateError::PosError(format!("{}", err)))?; - self.pos_state - .db - .write() - .write_batch(batch, Default::default(), Some(end_slot)); + self.pos_state.db.write().write_batch( + batch, + Default::default(), + Some(end_slot), + only_use_xor, + ); // Feed final_state_hash to the completed cycle - self.feed_cycle_hash_and_selector_for_interpolation(cycle)?; + self.feed_cycle_hash_and_selector_for_interpolation(cycle, only_use_xor)?; } // Then, build the last cycle @@ -465,7 +496,7 @@ impl FinalState { // If the end_slot_cycle is completed if end_slot.is_last_of_cycle(self.config.periods_per_cycle, self.config.thread_count) { // Feed final_state_hash to the completed cycle - self.feed_cycle_hash_and_selector_for_interpolation(end_slot_cycle)?; + self.feed_cycle_hash_and_selector_for_interpolation(end_slot_cycle, only_use_xor)?; } // We reduce the cycle_history len as needed @@ -478,7 +509,7 @@ impl FinalState { self.db .write() - .write_batch(batch, Default::default(), Some(end_slot)); + .write_batch(batch, Default::default(), Some(end_slot), only_use_xor); Ok(()) } @@ -487,11 +518,12 @@ impl FinalState { fn feed_cycle_hash_and_selector_for_interpolation( &mut self, cycle: u64, + only_use_xor: bool, ) -> Result<(), FinalStateError> { let final_state_hash = self.db.read().get_db_hash(); self.pos_state - .feed_cycle_state_hash(cycle, final_state_hash); + .feed_cycle_state_hash(cycle, final_state_hash, only_use_xor); self.pos_state .feed_selector(cycle.checked_add(2).ok_or_else(|| { @@ -507,15 +539,18 @@ impl FinalState { /// /// USED ONLY FOR BOOTSTRAP pub fn reset(&mut self) { + // TO UPDATE + let only_use_xor = self.get_only_use_xor(); + self.db .write() .reset(Slot::new(0, self.config.thread_count.saturating_sub(1))); - self.ledger.reset(); - self.async_pool.reset(); - self.pos_state.reset(); - self.executed_ops.reset(); - self.executed_denunciations.reset(); - self.mip_store.reset_db(self.db.clone()); + self.ledger.reset(only_use_xor); + self.async_pool.reset(only_use_xor); + self.pos_state.reset(only_use_xor); + self.executed_ops.reset(only_use_xor); + self.executed_denunciations.reset(only_use_xor); + self.mip_store.reset_db(self.db.clone(), only_use_xor); } /// Performs the initial draws. @@ -530,7 +565,10 @@ impl FinalState { /// /// Panics if the new slot is not the one coming just after the current one. pub fn finalize(&mut self, slot: Slot, changes: StateChanges) { - debug!("AURELIEN: Execution: finalize slot {}: start function", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: start function", + slot + ); let cur_slot = self.db.read().get_change_id().expect(CHANGE_ID_DESER_ERROR); // check slot consistency let next_slot = cur_slot @@ -547,46 +585,86 @@ impl FinalState { // apply the state changes to the batch - debug!("AURELIEN: Execution: finalize slot {}: before apply async pool", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before apply async pool", + slot + ); self.async_pool .apply_changes_to_batch(&changes.async_pool_changes, &mut db_batch); - debug!("AURELIEN: Execution: finalize slot {}: after apply async pool", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after apply async pool", + slot + ); - debug!("AURELIEN: Execution: finalize slot {}: before apply pos state", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before apply pos state", + slot + ); self.pos_state .apply_changes_to_batch(changes.pos_changes.clone(), slot, true, &mut db_batch) .expect("could not settle slot in final state proof-of-stake"); - debug!("AURELIEN: Execution: finalize slot {}: after apply pos state", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after apply pos state", + slot + ); // TODO: // do not panic above, it might just mean that the lookback cycle is not available // bootstrap again instead - debug!("AURELIEN: Execution: finalize slot {}: before apply ledger changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before apply ledger changes", + slot + ); self.ledger .apply_changes_to_batch(changes.ledger_changes.clone(), &mut db_batch); - debug!("AURELIEN: Execution: finalize slot {}: after apply ledger changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after apply ledger changes", + slot + ); - debug!("AURELIEN: Execution: finalize slot {}: before apply executed ops changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before apply executed ops changes", + slot + ); self.executed_ops.apply_changes_to_batch( changes.executed_ops_changes.clone(), slot, &mut db_batch, ); - debug!("AURELIEN: Execution: finalize slot {}: after apply executed ops changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after apply executed ops changes", + slot + ); - debug!("AURELIEN: Execution: finalize slot {}: before apply executed denunciations changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before apply executed denunciations changes", + slot + ); self.executed_denunciations.apply_changes_to_batch( changes.executed_denunciations_changes.clone(), slot, &mut db_batch, ); - debug!("AURELIEN: Execution: finalize slot {}: after apply executed denunciations changes", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: after apply executed denunciations changes", + slot + ); + + debug!( + "AURELIEN: Execution: finalize slot {}: before db write batch", + slot + ); + + // TO UPDATE + let only_use_xor = self.get_only_use_xor(); - debug!("AURELIEN: Execution: finalize slot {}: before db write batch", slot); self.db .write() - .write_batch(db_batch, Default::default(), Some(slot)); - debug!("AURELIEN: Execution: finalize slot {}: after db write batch", slot); + .write_batch(db_batch, Default::default(), Some(slot), only_use_xor); + debug!( + "AURELIEN: Execution: finalize slot {}: after db write batch", + slot + ); let final_state_hash = self.db.read().get_db_hash(); @@ -618,11 +696,17 @@ impl FinalState { debug!("AURELIEN: Execution: finalize slot {}: after backup", slot); // feed final_state_hash to the last cycle - debug!("AURELIEN: Execution: finalize slot {}: before feed_cycle_state_hash", slot); + debug!( + "AURELIEN: Execution: finalize slot {}: before feed_cycle_state_hash", + slot + ); let cycle = slot.get_cycle(self.config.periods_per_cycle); self.pos_state - .feed_cycle_state_hash(cycle, final_state_hash); - debug!("AURELIEN: Execution: finalize slot {}: after feed_cycle_state_hash", slot); + .feed_cycle_state_hash(cycle, final_state_hash, only_use_xor); + debug!( + "AURELIEN: Execution: finalize slot {}: after feed_cycle_state_hash", + slot + ); debug!("AURELIEN: Execution: finalize slot {}: end function", slot); } diff --git a/massa-final-state/src/tests/scenarios.rs b/massa-final-state/src/tests/scenarios.rs index d3d55ead1ad..dde44390bfb 100644 --- a/massa-final-state/src/tests/scenarios.rs +++ b/massa-final-state/src/tests/scenarios.rs @@ -161,7 +161,7 @@ fn test_final_state() { fs.write() .db .write() - .write_batch(batch, versioning_batch, Some(slot)); + .write_batch(batch, versioning_batch, Some(slot), false); let slot = Slot::new(1, 0); let mut state_changes = StateChanges::default(); diff --git a/massa-ledger-exports/src/controller.rs b/massa-ledger-exports/src/controller.rs index 78fe8516631..7493c44475a 100644 --- a/massa-ledger-exports/src/controller.rs +++ b/massa-ledger-exports/src/controller.rs @@ -7,7 +7,7 @@ use ::massa_db::DBBatch; pub trait LedgerController: Send + Sync + Debug { /// Loads ledger from file - fn load_initial_ledger(&mut self) -> Result<(), LedgerError>; + fn load_initial_ledger(&mut self, only_use_xor: bool) -> Result<(), LedgerError>; /// Gets the balance of a ledger entry /// @@ -46,7 +46,7 @@ pub trait LedgerController: Send + Sync + Debug { /// Reset the ledger /// /// USED FOR BOOTSTRAP ONLY - fn reset(&mut self); + fn reset(&mut self, only_use_xor: bool); fn apply_changes_to_batch(&mut self, changes: LedgerChanges, ledger_batch: &mut DBBatch); diff --git a/massa-ledger-worker/src/ledger.rs b/massa-ledger-worker/src/ledger.rs index 736f92b1f93..4e81998d658 100644 --- a/massa-ledger-worker/src/ledger.rs +++ b/massa-ledger-worker/src/ledger.rs @@ -53,7 +53,7 @@ impl FinalLedger { impl LedgerController for FinalLedger { /// Loads ledger from file - fn load_initial_ledger(&mut self) -> Result<(), LedgerError> { + fn load_initial_ledger(&mut self, only_use_xor: bool) -> Result<(), LedgerError> { // load the ledger tree from file let initial_ledger: HashMap = serde_json::from_str( &std::fs::read_to_string(&self.config.initial_ledger_path).map_err(|err| { @@ -77,7 +77,8 @@ impl LedgerController for FinalLedger { err )) })?; - self.sorted_ledger.load_initial_ledger(initial_ledger); + self.sorted_ledger + .load_initial_ledger(initial_ledger, only_use_xor); Ok(()) } @@ -149,8 +150,8 @@ impl LedgerController for FinalLedger { /// Reset the disk ledger. /// /// USED FOR BOOTSTRAP ONLY - fn reset(&mut self) { - self.sorted_ledger.reset(); + fn reset(&mut self, only_use_xor: bool) { + self.sorted_ledger.reset(only_use_xor); } /// Allows applying `LedgerChanges` to the final ledger diff --git a/massa-ledger-worker/src/ledger_db.rs b/massa-ledger-worker/src/ledger_db.rs index a75e20eb04d..4d72733fca3 100644 --- a/massa-ledger-worker/src/ledger_db.rs +++ b/massa-ledger-worker/src/ledger_db.rs @@ -90,7 +90,11 @@ impl LedgerDB { /// Loads the initial disk ledger /// /// # Arguments - pub fn load_initial_ledger(&mut self, initial_ledger: HashMap) { + pub fn load_initial_ledger( + &mut self, + initial_ledger: HashMap, + only_use_xor: bool, + ) { let mut batch = DBBatch::new(); for (address, entry) in initial_ledger { @@ -101,6 +105,7 @@ impl LedgerDB { batch, Default::default(), Some(Slot::new(0, self.thread_count.saturating_sub(1))), + only_use_xor, ); } @@ -190,8 +195,10 @@ impl LedgerDB { Some(iter.collect()) } - pub fn reset(&self) { - self.db.write().delete_prefix(LEDGER_PREFIX, STATE_CF, None); + pub fn reset(&self, only_use_xor: bool) { + self.db + .write() + .delete_prefix(LEDGER_PREFIX, STATE_CF, None, only_use_xor); } /// Deserializes the key and value, useful after bootstrap @@ -539,7 +546,7 @@ mod tests { ledger_db .db .write() - .write_batch(batch, Default::default(), None); + .write_batch(batch, Default::default(), None, false); // return db and initial data (ledger_db, data) @@ -582,7 +589,7 @@ mod tests { ledger_db .db .write() - .write_batch(batch, Default::default(), None); + .write_batch(batch, Default::default(), None, false); // check deleted address and ledger hash assert_eq!( diff --git a/massa-ledger-worker/src/test_exports/bootstrap.rs b/massa-ledger-worker/src/test_exports/bootstrap.rs index 2c4454d886e..675c30ae43f 100644 --- a/massa-ledger-worker/src/test_exports/bootstrap.rs +++ b/massa-ledger-worker/src/test_exports/bootstrap.rs @@ -22,7 +22,7 @@ pub fn create_final_ledger( config.max_key_length, config.max_datastore_value_length, ); - ledger_db.load_initial_ledger(initial_ledger); + ledger_db.load_initial_ledger(initial_ledger, false); FinalLedger { config, sorted_ledger: ledger_db, diff --git a/massa-pool-worker/src/operation_pool.rs b/massa-pool-worker/src/operation_pool.rs index a3bab1cadea..279f8150f15 100644 --- a/massa-pool-worker/src/operation_pool.rs +++ b/massa-pool-worker/src/operation_pool.rs @@ -113,7 +113,10 @@ impl OperationPool { .iter() .copied() .collect::>(); - debug!("AURELIEN: Op pool: add_operations start with {} ops", items.len()); + debug!( + "AURELIEN: Op pool: add_operations start with {} ops", + items.len() + ); let mut added = PreHashSet::with_capacity(items.len()); let mut removed = PreHashSet::with_capacity(items.len()); @@ -199,7 +202,10 @@ impl OperationPool { /// - fit inside the block /// - is the most profitable for block producer pub fn get_block_operations(&self, slot: &Slot) -> (Vec, Storage) { - debug!("AURELIEN: Block prod {}: Op pool: get_block_operations start", slot); + debug!( + "AURELIEN: Block prod {}: Op pool: get_block_operations start", + slot + ); // init list of selected operation IDs let mut op_ids = Vec::new(); @@ -290,7 +296,10 @@ impl OperationPool { *creator_balance = creator_balance.saturating_sub(op_info.max_spending); debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations end one iter on op id {}", slot, cursor.get_id()); } - debug!("AURELIEN: Block prod {}: Op pool: get_block_operations end iterating", slot); + debug!( + "AURELIEN: Block prod {}: Op pool: get_block_operations end iterating", + slot + ); // generate storage let mut res_storage = self.storage.clone_without_refs(); @@ -299,7 +308,10 @@ impl OperationPool { if claimed_ops.len() != claim_ops.len() { panic!("could not claim all operations from storage"); } - debug!("AURELIEN: Block prod {}: Op pool: get_block_operations end function", slot); + debug!( + "AURELIEN: Block prod {}: Op pool: get_block_operations end function", + slot + ); (op_ids, res_storage) } diff --git a/massa-pos-exports/src/pos_final_state.rs b/massa-pos-exports/src/pos_final_state.rs index 695a9088e53..eed46be9f5b 100644 --- a/massa-pos-exports/src/pos_final_state.rs +++ b/massa-pos-exports/src/pos_final_state.rs @@ -211,10 +211,10 @@ impl PoSFinalState { /// Reset the state of the PoS final state /// /// USED ONLY FOR BOOTSTRAP - pub fn reset(&mut self) { + pub fn reset(&mut self, only_use_xor: bool) { let mut db = self.db.write(); - db.delete_prefix(CYCLE_HISTORY_PREFIX, STATE_CF, None); - db.delete_prefix(DEFERRED_CREDITS_PREFIX, STATE_CF, None); + db.delete_prefix(CYCLE_HISTORY_PREFIX, STATE_CF, None, only_use_xor); + db.delete_prefix(DEFERRED_CREDITS_PREFIX, STATE_CF, None, only_use_xor); self.cycle_history_cache = Default::default(); self.rng_seed_cache = None; } @@ -548,7 +548,7 @@ impl PoSFinalState { } /// Feeds the selector targeting a given draw cycle - pub fn feed_cycle_state_hash(&self, cycle: u64, final_state_hash: Hash) { + pub fn feed_cycle_state_hash(&self, cycle: u64, final_state_hash: Hash, only_use_xor: bool) { if self.get_cycle_index(cycle).is_some() { let mut batch = DBBatch::new(); self.put_cycle_history_final_state_hash_snapshot( @@ -557,7 +557,9 @@ impl PoSFinalState { &mut batch, ); - self.db.write().write_batch(batch, Default::default(), None); + self.db + .write() + .write_batch(batch, Default::default(), None, only_use_xor); } else { panic!("cycle {} should be contained here", cycle); } @@ -1527,7 +1529,7 @@ fn test_pos_final_state_hash_computation() { let mut batch = DBBatch::new(); pos_state.create_initial_cycle(&mut batch); db.write() - .write_batch(batch, Default::default(), Some(Slot::new(0, 0))); + .write_batch(batch, Default::default(), Some(Slot::new(0, 0)), false); let addr = Address::from_public_key(&KeyPair::generate(0).unwrap().get_public_key()); @@ -1554,7 +1556,7 @@ fn test_pos_final_state_hash_computation() { .apply_changes_to_batch(changes, Slot::new(0, 0), false, &mut batch) .unwrap(); db.write() - .write_batch(batch, Default::default(), Some(Slot::new(0, 0))); + .write_batch(batch, Default::default(), Some(Slot::new(0, 0)), false); // update changes once roll_changes.clear(); @@ -1579,7 +1581,7 @@ fn test_pos_final_state_hash_computation() { .apply_changes_to_batch(changes, Slot::new(0, 1), false, &mut batch) .unwrap(); db.write() - .write_batch(batch, Default::default(), Some(Slot::new(0, 1))); + .write_batch(batch, Default::default(), Some(Slot::new(0, 1)), false); // update changes twice roll_changes.clear(); @@ -1605,7 +1607,7 @@ fn test_pos_final_state_hash_computation() { .apply_changes_to_batch(changes, Slot::new(1, 0), false, &mut batch) .unwrap(); db.write() - .write_batch(batch, Default::default(), Some(Slot::new(1, 0))); + .write_batch(batch, Default::default(), Some(Slot::new(1, 0)), false); let cycles = pos_state.get_cycle_history_cycles(); assert_eq!(cycles.len(), 1, "wrong number of cycles"); diff --git a/massa-versioning/src/versioning.rs b/massa-versioning/src/versioning.rs index 04b2cfe5431..046d1fa09f6 100644 --- a/massa-versioning/src/versioning.rs +++ b/massa-versioning/src/versioning.rs @@ -613,12 +613,12 @@ impl MipStore { guard.extend_from_db(db) } - pub fn reset_db(&self, db: Arc>) { + pub fn reset_db(&self, db: Arc>, only_use_xor: bool) { { let mut guard = db.write(); - guard.delete_prefix(MIP_STORE_PREFIX, STATE_CF, None); - guard.delete_prefix(MIP_STORE_PREFIX, VERSIONING_CF, None); - guard.delete_prefix(MIP_STORE_STATS_PREFIX, VERSIONING_CF, None); + guard.delete_prefix(MIP_STORE_PREFIX, STATE_CF, None, only_use_xor); + guard.delete_prefix(MIP_STORE_PREFIX, VERSIONING_CF, None, only_use_xor); + guard.delete_prefix(MIP_STORE_STATS_PREFIX, VERSIONING_CF, None, only_use_xor); } } } @@ -2129,7 +2129,7 @@ mod test { let mut guard_db = db.write(); // FIXME / TODO: no slot hardcoding? - guard_db.write_batch(db_batch, db_versioning_batch, Some(Slot::new(3, 0))); + guard_db.write_batch(db_batch, db_versioning_batch, Some(Slot::new(3, 0)), false); drop(guard_db); // Step 4 From 965a61ca8ac2e3b9f546e9a1c3df7892ecea4ed7 Mon Sep 17 00:00:00 2001 From: Leo-Besancon Date: Thu, 15 Jun 2023 11:47:05 +0200 Subject: [PATCH 19/24] Compute xor --- massa-db/src/massa_db.rs | 67 ++++++++++++++++++++++------ massa-final-state/src/final_state.rs | 1 - massa-versioning/src/versioning.rs | 2 +- 3 files changed, 54 insertions(+), 16 deletions(-) diff --git a/massa-db/src/massa_db.rs b/massa-db/src/massa_db.rs index aba57aed439..a00ca222928 100644 --- a/massa-db/src/massa_db.rs +++ b/massa-db/src/massa_db.rs @@ -446,7 +446,7 @@ where let handle_versioning = self.db.cf_handle(VERSIONING_CF).expect(CF_ERROR); debug!("AURELIEN: MassaDB: after get handles"); - let _current_xor_hash = self.get_db_hash_xor(); + let mut current_xor_hash = self.get_db_hash_xor(); *self.current_batch.lock() = WriteBatch::default(); @@ -454,14 +454,14 @@ where if let Some(value) = value { debug!("AURELIEN: MassaDB: before insert value"); self.current_batch.lock().put_cf(handle_state, key, value); - let key_hash = Hash::compute_from(key); - let value_hash = Hash::compute_from(value); - let _key_value_hash = - Hash::compute_from(&[key.as_slice(), value.as_slice()].concat()); debug!("AURELIEN: MassaDB: after insert value"); + // Compute LSM TREE if we need to if !only_use_xor { debug!("AURELIEN: MassaDB: before tree update"); + let key_hash = Hash::compute_from(key); + let value_hash = Hash::compute_from(value); + self.lsmtree .update( key_hash.to_bytes(), @@ -470,17 +470,37 @@ where .expect(LSMTREE_ERROR); debug!("AURELIEN: MassaDB: after tree update"); } + + // Compute the XOR in all cases + if let Ok(Some(prev_value)) = self.db.get_cf(handle_state, key) { + let prev_hash = + Hash::compute_from(&[key.as_slice(), prev_value.as_slice()].concat()); + current_xor_hash ^= prev_hash; + }; + + let new_hash = Hash::compute_from(&[key.as_slice(), value.as_slice()].concat()); + current_xor_hash ^= new_hash; } else { debug!("AURELIEN: MassaDB: before delete"); self.current_batch.lock().delete_cf(handle_state, key); - let key_hash = Hash::compute_from(key); debug!("AURELIEN: MassaDB: after delete"); - debug!("AURELIEN: MassaDB: before tree remove"); - self.lsmtree - .remove(key_hash.to_bytes()) - .expect(LSMTREE_ERROR); - debug!("AURELIEN: MassaDB: after tree remove"); + // Compute LSM TREE if we need to + if !only_use_xor { + debug!("AURELIEN: MassaDB: before tree remove"); + let key_hash = Hash::compute_from(key); + self.lsmtree + .remove(key_hash.to_bytes()) + .expect(LSMTREE_ERROR); + debug!("AURELIEN: MassaDB: after tree remove"); + } + + // Compute the XOR in all cases + if let Ok(Some(prev_value)) = self.db.get_cf(handle_state, key) { + let prev_hash = + Hash::compute_from(&[key.as_slice(), prev_value.as_slice()].concat()); + current_xor_hash ^= prev_hash; + }; } } @@ -505,9 +525,28 @@ where debug!("AURELIEN: MassaDB: after change id"); debug!("AURELIEN: MassaDB: before state hash change"); - self.current_batch - .lock() - .put_cf(handle_metadata, STATE_HASH_KEY, self.lsmtree.root()); + + // Update the hash entries: + // - always update the STATE_HASH_XOR_KEY + // - if only_use_xor, we update the STATE_HASH_KEY with the xor hash + // - if not only_use_xor, we update the STATE_HASH_KEY with the lsm tree root + self.current_batch.lock().put_cf( + handle_metadata, + STATE_HASH_XOR_KEY, + current_xor_hash.to_bytes(), + ); + if only_use_xor { + self.current_batch.lock().put_cf( + handle_metadata, + STATE_HASH_KEY, + current_xor_hash.to_bytes(), + ); + } else { + self.current_batch + .lock() + .put_cf(handle_metadata, STATE_HASH_KEY, self.lsmtree.root()); + } + debug!("AURELIEN: MassaDB: after state hash change"); debug!("AURELIEN: MassaDB: before write rocks"); diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index bf1b8b8103b..7b66920d7ce 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -59,7 +59,6 @@ pub struct FinalState { } impl FinalState { - /// Initializes a new `FinalState` /// /// # Arguments diff --git a/massa-versioning/src/versioning.rs b/massa-versioning/src/versioning.rs index 0c42e7de7af..caf5dea9743 100644 --- a/massa-versioning/src/versioning.rs +++ b/massa-versioning/src/versioning.rs @@ -932,7 +932,7 @@ impl MipStoreRaw { .find_map(|(vi, vsh)| { let res = vsh.state_at(ts, vi.start, vi.timeout); match res { - Ok(ComponentStateTypeId::Active) => vi.components.get(&component).copied(), + Ok(ComponentStateTypeId::Active) => vi.components.get(component).copied(), _ => None, } }) From eba864c84cf8aebee34fb0f01fbe34ce9bd9d858 Mon Sep 17 00:00:00 2001 From: Leo-Besancon Date: Thu, 15 Jun 2023 12:21:12 +0200 Subject: [PATCH 20/24] Fix lsm_tree init --- massa-db/src/constants.rs | 1 + massa-db/src/massa_db.rs | 23 +++++++++++++++++++---- massa-final-state/src/final_state.rs | 4 ---- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/massa-db/src/constants.rs b/massa-db/src/constants.rs index 9bbe76441ca..d52a616dc04 100644 --- a/massa-db/src/constants.rs +++ b/massa-db/src/constants.rs @@ -9,6 +9,7 @@ pub const VERSIONING_CF: &str = "versioning"; pub const STATE_HASH_KEY: &[u8; 1] = b"h"; pub const STATE_HASH_XOR_KEY: &[u8; 1] = b"x"; +pub const STATE_HASH_KEY_IS_XOR_KEY: &[u8; 6] = b"is_xor"; pub const STATE_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; pub const CHANGE_ID_KEY: &[u8; 1] = b"c"; diff --git a/massa-db/src/massa_db.rs b/massa-db/src/massa_db.rs index a00ca222928..12361892b2c 100644 --- a/massa-db/src/massa_db.rs +++ b/massa-db/src/massa_db.rs @@ -1,7 +1,8 @@ use crate::{ MassaDBError, CF_ERROR, CHANGE_ID_DESER_ERROR, CHANGE_ID_KEY, CHANGE_ID_SER_ERROR, CRUD_ERROR, LSMTREE_ERROR, LSMTREE_NODES_CF, LSMTREE_VALUES_CF, METADATA_CF, OPEN_ERROR, STATE_CF, - STATE_HASH_ERROR, STATE_HASH_INITIAL_BYTES, STATE_HASH_KEY, STATE_HASH_XOR_KEY, VERSIONING_CF, + STATE_HASH_ERROR, STATE_HASH_INITIAL_BYTES, STATE_HASH_KEY, STATE_HASH_KEY_IS_XOR_KEY, + STATE_HASH_XOR_KEY, VERSIONING_CF, }; use lsmtree::{bytes::Bytes, BadProof, KVStore, SparseMerkleTree}; use massa_hash::{Hash, SmtHasher}; @@ -541,6 +542,9 @@ where STATE_HASH_KEY, current_xor_hash.to_bytes(), ); + self.current_batch + .lock() + .put_cf(handle_metadata, STATE_HASH_KEY_IS_XOR_KEY, [1]); } else { self.current_batch .lock() @@ -766,12 +770,23 @@ impl RawMassaDB { ); let handle_metadata = db.cf_handle(METADATA_CF).expect(CF_ERROR); + let lsmtree = match db - .get_cf(handle_metadata, STATE_HASH_KEY) + .get_cf(handle_metadata, STATE_HASH_KEY_IS_XOR_KEY) .expect(CRUD_ERROR) { - Some(hash_bytes) => SparseMerkleTree::import(nodes_store, values_store, hash_bytes), - _ => SparseMerkleTree::new_with_stores(nodes_store, values_store), + Some(_value) => SparseMerkleTree::new_with_stores(nodes_store, values_store), + _ => { + match db + .get_cf(handle_metadata, STATE_HASH_KEY) + .expect(CRUD_ERROR) + { + Some(hash_bytes) => { + SparseMerkleTree::import(nodes_store, values_store, hash_bytes) + } + _ => SparseMerkleTree::new_with_stores(nodes_store, values_store), + } + } }; let massa_db = Self { diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index 7b66920d7ce..88f1dcc92c4 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -125,7 +125,6 @@ impl FinalState { }; if reset_final_state { - // TO UPDATE let only_use_xor = final_state.get_only_use_xor(); final_state.async_pool.reset(only_use_xor); @@ -243,7 +242,6 @@ impl FinalState { // We compute the draws here because we need to feed_cycles when interpolating final_state.compute_initial_draws()?; - // TO UPDATE let only_use_xor = final_state.get_only_use_xor(); final_state.interpolate_downtime(only_use_xor)?; @@ -531,7 +529,6 @@ impl FinalState { /// /// USED ONLY FOR BOOTSTRAP pub fn reset(&mut self) { - // TO UPDATE let only_use_xor = self.get_only_use_xor(); self.db @@ -647,7 +644,6 @@ impl FinalState { slot ); - // TO UPDATE let only_use_xor = self.get_only_use_xor(); self.db From 6f61d6be6826eee249cd8afc42ed78d73baa143d Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 15 Jun 2023 18:22:10 +0200 Subject: [PATCH 21/24] Remove debug print --- Cargo.lock | 1 - massa-db/Cargo.toml | 1 - massa-db/src/massa_db.rs | 27 ------- massa-execution-worker/src/execution.rs | 81 ------------------- massa-execution-worker/src/worker.rs | 29 +------ massa-factory-worker/src/block_factory.rs | 15 +--- massa-final-state/src/final_state.rs | 68 +--------------- massa-pool-worker/src/operation_pool.rs | 23 ------ .../src/handlers/block_handler/retrieval.rs | 18 ----- massa-protocol-worker/src/messages.rs | 2 - 10 files changed, 3 insertions(+), 262 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f84d8c530a9..de7f33b54fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2629,7 +2629,6 @@ dependencies = [ "parking_lot", "rocksdb", "thiserror", - "tracing", ] [[package]] diff --git a/massa-db/Cargo.toml b/massa-db/Cargo.toml index 7c5b32a6c0b..b06c258172a 100644 --- a/massa-db/Cargo.toml +++ b/massa-db/Cargo.toml @@ -11,7 +11,6 @@ rocksdb = "0.20" displaydoc = "0.2" thiserror = "1.0" lsmtree = "=0.1.1" -tracing = "0.1" # Custom modules massa_hash = { path = "../massa-hash" } diff --git a/massa-db/src/massa_db.rs b/massa-db/src/massa_db.rs index 12361892b2c..a1bf710bf38 100644 --- a/massa-db/src/massa_db.rs +++ b/massa-db/src/massa_db.rs @@ -24,7 +24,6 @@ use std::{ path::PathBuf, sync::Arc, }; -use tracing::debug; type Key = Vec; type Value = Vec; @@ -433,7 +432,6 @@ where reset_history: bool, only_use_xor: bool, ) -> Result<(), MassaDBError> { - debug!("AURELIEN: MassaDB: start write changes"); if let Some(change_id) = change_id.clone() { if change_id < self.get_change_id().expect(CHANGE_ID_DESER_ERROR) { return Err(MassaDBError::InvalidChangeID(String::from( @@ -445,7 +443,6 @@ where let handle_state = self.db.cf_handle(STATE_CF).expect(CF_ERROR); let handle_metadata = self.db.cf_handle(METADATA_CF).expect(CF_ERROR); let handle_versioning = self.db.cf_handle(VERSIONING_CF).expect(CF_ERROR); - debug!("AURELIEN: MassaDB: after get handles"); let mut current_xor_hash = self.get_db_hash_xor(); @@ -453,13 +450,10 @@ where for (key, value) in changes.iter() { if let Some(value) = value { - debug!("AURELIEN: MassaDB: before insert value"); self.current_batch.lock().put_cf(handle_state, key, value); - debug!("AURELIEN: MassaDB: after insert value"); // Compute LSM TREE if we need to if !only_use_xor { - debug!("AURELIEN: MassaDB: before tree update"); let key_hash = Hash::compute_from(key); let value_hash = Hash::compute_from(value); @@ -469,7 +463,6 @@ where Bytes::from(value_hash.to_bytes().to_vec()), ) .expect(LSMTREE_ERROR); - debug!("AURELIEN: MassaDB: after tree update"); } // Compute the XOR in all cases @@ -482,18 +475,14 @@ where let new_hash = Hash::compute_from(&[key.as_slice(), value.as_slice()].concat()); current_xor_hash ^= new_hash; } else { - debug!("AURELIEN: MassaDB: before delete"); self.current_batch.lock().delete_cf(handle_state, key); - debug!("AURELIEN: MassaDB: after delete"); // Compute LSM TREE if we need to if !only_use_xor { - debug!("AURELIEN: MassaDB: before tree remove"); let key_hash = Hash::compute_from(key); self.lsmtree .remove(key_hash.to_bytes()) .expect(LSMTREE_ERROR); - debug!("AURELIEN: MassaDB: after tree remove"); } // Compute the XOR in all cases @@ -508,7 +497,6 @@ where // in versioning_changes, we have the data that we do not want to include in hash // e.g everything that is not in 'Active' state (so hashes remain compatibles) for (key, value) in versioning_changes.iter() { - debug!("AURELIEN: MassaDB: before add versioning change"); if let Some(value) = value { self.current_batch .lock() @@ -516,16 +504,11 @@ where } else { self.current_batch.lock().delete_cf(handle_versioning, key); } - debug!("AURELIEN: MassaDB: after add versioning change"); } - debug!("AURELIEN: MassaDB: before change id"); if let Some(change_id) = change_id { self.set_change_id_to_batch(change_id); } - debug!("AURELIEN: MassaDB: after change id"); - - debug!("AURELIEN: MassaDB: before state hash change"); // Update the hash entries: // - always update the STATE_HASH_XOR_KEY @@ -551,9 +534,6 @@ where .put_cf(handle_metadata, STATE_HASH_KEY, self.lsmtree.root()); } - debug!("AURELIEN: MassaDB: after state hash change"); - - debug!("AURELIEN: MassaDB: before write rocks"); { let mut current_batch_guard = self.current_batch.lock(); let batch = WriteBatch::from_data(current_batch_guard.data()); @@ -563,11 +543,9 @@ where MassaDBError::RocksDBError(format!("Can't write batch to disk: {}", e)) })?; } - debug!("AURELIEN: MassaDB: after write rocks"); self.current_hashmap.write().clear(); - debug!("AURELIEN: MassaDB: before changes history update"); self.change_history .entry(self.get_change_id().expect(CHANGE_ID_DESER_ERROR)) .and_modify(|map| map.extend(changes.clone().into_iter())) @@ -577,9 +555,7 @@ where .entry(self.get_change_id().expect(CHANGE_ID_DESER_ERROR)) .and_modify(|map| map.extend(versioning_changes.clone().into_iter())) .or_insert(versioning_changes); - debug!("AURELIEN: MassaDB: after changes history update"); - debug!("AURELIEN: MassaDB: before cleaning"); if reset_history { self.change_history.clear(); } @@ -587,9 +563,6 @@ where while self.change_history.len() > self.config.max_history_length { self.change_history.pop_first(); } - debug!("AURELIEN: MassaDB: after cleaning"); - - debug!("AURELIEN: MassaDB: end write changes"); Ok(()) } diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 35e986da6f2..3576806174d 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -197,19 +197,11 @@ impl ExecutionState { /// # Arguments /// * `exec_out`: execution output to apply pub fn apply_final_execution_output(&mut self, mut exec_out: ExecutionOutput) { - debug!( - "AURELIEN: Execution: start function cache for final slot {:?}", - exec_out.slot - ); if self.final_cursor >= exec_out.slot { panic!("attempting to apply a final execution output at or before the current final_cursor"); } // count stats - debug!( - "AURELIEN: Execution: before stat counter {:?}", - exec_out.slot - ); if exec_out.block_id.is_some() { self.stats_counter.register_final_blocks(1); self.stats_counter.register_final_executed_operations( @@ -219,17 +211,11 @@ impl ExecutionState { exec_out.state_changes.executed_denunciations_changes.len(), ); } - debug!( - "AURELIEN: Execution: after stat counter {:?}", - exec_out.slot - ); - debug!("AURELIEN: Execution: before finalize {:?}", exec_out.slot); // apply state changes to the final ledger self.final_state .write() .finalize(exec_out.slot, exec_out.state_changes); - debug!("AURELIEN: Execution: after finalize {:?}", exec_out.slot); // update the final ledger's slot self.final_cursor = exec_out.slot; @@ -241,35 +227,15 @@ impl ExecutionState { } // append generated events to the final event store - debug!( - "AURELIEN: Execution: before finalize events for final slot {:?}", - exec_out.slot - ); exec_out.events.finalize(); self.final_events.extend(exec_out.events); self.final_events.prune(self.config.max_final_events); - debug!( - "AURELIEN: Execution: after finalize events for final slot {:?}", - exec_out.slot - ); // update the prometheus metrics - debug!( - "AURELIEN: Execution: before metrics for final slot {:?}", - exec_out.slot - ); self.massa_metrics .set_active_cursor(self.active_cursor.period, self.active_cursor.thread); self.massa_metrics .set_final_cursor(self.final_cursor.period, self.final_cursor.thread); - debug!( - "AURELIEN: Execution: after metrics for final slot {:?}", - exec_out.slot - ); - debug!( - "AURELIEN: Execution: end function cache for final slot {:?}", - exec_out.slot - ); } /// Applies an execution output to the active (non-final) state @@ -1306,24 +1272,13 @@ impl ExecutionState { let first_exec_output = self.active_history.write().0.pop_front(); if let Some(exec_out) = first_exec_output { if &exec_out.slot == slot && exec_out.block_id == target_id { - debug!("AURELIEN: Execution: start cache for final slot {:?}", slot); // speculative execution front result matches what we want to compute // apply the cached output and return self.apply_final_execution_output(exec_out.clone()); - debug!( - "AURELIEN: Execution: end apply cache for final slot {:?}", - slot - ); // update versioning stats self.update_versioning_stats(exec_target, slot); - debug!( - "AURELIEN: Execution: end versioning update for final slot {:?}", - slot - ); - - debug!("execute_final_slot: found in cache, applied cache and updated versioning stats"); // Broadcast a final slot execution output to active channel subscribers. if self.config.broadcast_enabled { @@ -1340,7 +1295,6 @@ impl ExecutionState { ); } } - debug!("AURELIEN: Execution: end cache for final slot {:?}", slot); return; } else { // speculative cache mismatch @@ -1357,10 +1311,6 @@ impl ExecutionState { ); } - debug!( - "AURELIEN: Execution: start recompute for final slot {:?}", - slot - ); // truncate the whole execution queue self.active_history.write().0.clear(); self.active_cursor = self.final_cursor; @@ -1672,7 +1622,6 @@ impl ExecutionState { ops: &PreHashSet, thread: u8, ) -> PreHashSet { - debug!("AURELIEN: Execution: {:?} start op check", ops); let mut ops = ops.clone(); if ops.is_empty() { @@ -1703,7 +1652,6 @@ impl ExecutionState { let final_state = self.final_state.read(); ops.retain(|op_id| !final_state.executed_ops.contains(op_id)); } - debug!("AURELIEN: Execution: {:?} end op check", ops); ops } @@ -1756,10 +1704,6 @@ impl ExecutionState { exec_target: Option<&(BlockId, Storage)>, slot: &Slot, ) { - debug!( - "AURELIEN: Execution: Start versioning update for {:?}", - slot - ); // update versioning statistics if let Some((block_id, storage)) = exec_target { if let Some(block) = storage.read_blocks().get(block_id) { @@ -1772,19 +1716,11 @@ impl ExecutionState { let current_version = block.content.header.content.current_version; let announced_version = block.content.header.content.announced_version; - debug!( - "AURELIEN: Execution: before update_network_version_stats update for {:?}", - slot - ); if let Ok(slot_ts) = slot_ts_ { self.mip_store.update_network_version_stats( slot_ts, Some((current_version, announced_version)), ); - debug!( - "AURELIEN: Execution: after update_network_version_stats update for {:?}", - slot - ); // Now write mip store changes to disk (if any) let mut db_batch = DBBatch::new(); @@ -1798,10 +1734,6 @@ impl ExecutionState { ) .unwrap(); - debug!( - "AURELIEN: Execution: before update_batches update for {:?}", - slot - ); self.mip_store .update_batches( &mut db_batch, @@ -1814,15 +1746,6 @@ impl ExecutionState { slot_prev_ts, slot_ts, e ) }); - debug!( - "AURELIEN: Execution: after update_batches update for {:?}", - slot - ); - - debug!( - "AURELIEN: Execution: after update_batches update for {:?}", - slot - ); let use_only_xor = self.final_state.read().get_only_use_xor(); @@ -1832,10 +1755,6 @@ impl ExecutionState { None, use_only_xor, ); - debug!( - "AURELIEN: Execution: after write_batch update for {:?}", - slot - ); } else { warn!("Unable to get slot timestamp for slot: {} in order to update mip_store stats", slot); } diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index a55b0ab7357..73e99e23665 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -96,18 +96,7 @@ impl ExecutionThread { let (req, resp_tx) = req_resp.into_request_sender_pair(); // Acquire write access to the execution state (for cache updates) and execute the read-only request - debug!( - "AURELIEN: Execution: start execute_one_readonly_request {:?}", - &req - ); - let outcome = self - .execution_state - .write() - .execute_readonly_request(req.clone()); - debug!( - "AURELIEN: Execution: end execute_one_readonly_request {:?}", - req - ); + let outcome = self.execution_state.write().execute_readonly_request(req); // Send the execution output through resp_tx. // Ignore errors because they just mean that the request emitter dropped the received @@ -206,33 +195,17 @@ impl ExecutionThread { let run_result = self.slot_sequencer.run_task_with( |is_final: bool, slot: &Slot, content: Option<&(BlockId, Storage)>| { if is_final { - debug!( - "AURELIEN: Execution: start execution final of slot {:?}", - slot - ); self.execution_state.write().execute_final_slot( slot, content, self.selector.clone(), ); - debug!( - "AURELIEN: Execution: end execution final of slot {:?}", - slot - ); } else { - debug!( - "AURELIEN: Execution: start execution candidate of slot {:?}", - slot - ); self.execution_state.write().execute_candidate_slot( slot, content, self.selector.clone(), ); - debug!( - "AURELIEN: Execution: end execution candidate of slot {:?}", - slot - ); } }, ); diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index 380b7b54c41..913afbde5ef 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -18,7 +18,7 @@ use massa_versioning::versioning::MipStore; use massa_wallet::Wallet; use parking_lot::RwLock; use std::{sync::Arc, thread, time::Instant}; -use tracing::{debug, info, warn}; +use tracing::{info, warn}; /// Structure gathering all elements needed by the factory thread pub(crate) struct BlockFactoryWorker { @@ -133,11 +133,6 @@ impl BlockFactoryWorker { } }; - debug!( - "AURELIEN: Block prod {}: block factory selected block producer address for slot {}: {}", - slot, slot, block_producer_addr - ); - // check if the block producer address is handled by the wallet let block_producer_keypair_ref = self.wallet.read(); let block_producer_keypair = if let Some(kp) = @@ -194,9 +189,7 @@ impl BlockFactoryWorker { block_storage.extend(endo_storage); // gather operations and compute global operations hash - debug!("AURELIEN: Block prod {}: before ops fetched", slot,); let (op_ids, op_storage) = self.channels.pool.get_block_operations(&slot); - debug!("AURELIEN: Block prod {}: after ops fetched", slot,); if op_ids.len() > self.cfg.max_operations_per_block as usize { warn!("Too many operations returned"); return; @@ -211,7 +204,6 @@ impl BlockFactoryWorker { ); // create header - debug!("AURELIEN: Block prod {}: before get version", slot,); let current_version = self.mip_store.get_network_version_current(); let announced_version = self.mip_store.get_network_version_to_announce(); let header: SecuredHeader = BlockHeader::new_verifiable::( @@ -228,10 +220,6 @@ impl BlockFactoryWorker { block_producer_keypair, ) .expect("error while producing block header"); - debug!( - "AURELIEN: Block prod {}: after create block and get denunciations", - slot, - ); // create block let block_ = Block { header, @@ -258,7 +246,6 @@ impl BlockFactoryWorker { self.channels .consensus .register_block(block_id, slot, block_storage, true); - debug!("AURELIEN: Block prod {}: after register block", slot,); } /// main run loop of the block creator thread diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index 88f1dcc92c4..6ab8a67f680 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -554,10 +554,6 @@ impl FinalState { /// /// Panics if the new slot is not the one coming just after the current one. pub fn finalize(&mut self, slot: Slot, changes: StateChanges) { - debug!( - "AURELIEN: Execution: finalize slot {}: start function", - slot - ); let cur_slot = self.db.read().get_change_id().expect(CHANGE_ID_DESER_ERROR); // check slot consistency let next_slot = cur_slot @@ -574,85 +570,34 @@ impl FinalState { // apply the state changes to the batch - debug!( - "AURELIEN: Execution: finalize slot {}: before apply async pool", - slot - ); self.async_pool .apply_changes_to_batch(&changes.async_pool_changes, &mut db_batch); - debug!( - "AURELIEN: Execution: finalize slot {}: after apply async pool", - slot - ); - - debug!( - "AURELIEN: Execution: finalize slot {}: before apply pos state", - slot - ); self.pos_state .apply_changes_to_batch(changes.pos_changes.clone(), slot, true, &mut db_batch) .expect("could not settle slot in final state proof-of-stake"); - debug!( - "AURELIEN: Execution: finalize slot {}: after apply pos state", - slot - ); + // TODO: // do not panic above, it might just mean that the lookback cycle is not available // bootstrap again instead - - debug!( - "AURELIEN: Execution: finalize slot {}: before apply ledger changes", - slot - ); self.ledger .apply_changes_to_batch(changes.ledger_changes.clone(), &mut db_batch); - debug!( - "AURELIEN: Execution: finalize slot {}: after apply ledger changes", - slot - ); - - debug!( - "AURELIEN: Execution: finalize slot {}: before apply executed ops changes", - slot - ); self.executed_ops.apply_changes_to_batch( changes.executed_ops_changes.clone(), slot, &mut db_batch, ); - debug!( - "AURELIEN: Execution: finalize slot {}: after apply executed ops changes", - slot - ); - debug!( - "AURELIEN: Execution: finalize slot {}: before apply executed denunciations changes", - slot - ); self.executed_denunciations.apply_changes_to_batch( changes.executed_denunciations_changes.clone(), slot, &mut db_batch, ); - debug!( - "AURELIEN: Execution: finalize slot {}: after apply executed denunciations changes", - slot - ); - - debug!( - "AURELIEN: Execution: finalize slot {}: before db write batch", - slot - ); let only_use_xor = self.get_only_use_xor(); self.db .write() .write_batch(db_batch, Default::default(), Some(slot), only_use_xor); - debug!( - "AURELIEN: Execution: finalize slot {}: after db write batch", - slot - ); let final_state_hash = self.db.read().get_db_hash(); @@ -660,7 +605,6 @@ impl FinalState { info!("final_state hash at slot {}: {}", slot, final_state_hash); // Backup DB if needed - debug!("AURELIEN: Execution: finalize slot {}: before backup", slot); if slot.period % PERIODS_BETWEEN_BACKUPS == 0 && slot.period != 0 && slot.thread == 0 { let state_slot = self.db.read().get_change_id(); match state_slot { @@ -681,21 +625,11 @@ impl FinalState { self.db.read().backup_db(slot); } - debug!("AURELIEN: Execution: finalize slot {}: after backup", slot); // feed final_state_hash to the last cycle - debug!( - "AURELIEN: Execution: finalize slot {}: before feed_cycle_state_hash", - slot - ); let cycle = slot.get_cycle(self.config.periods_per_cycle); self.pos_state .feed_cycle_state_hash(cycle, final_state_hash, only_use_xor); - debug!( - "AURELIEN: Execution: finalize slot {}: after feed_cycle_state_hash", - slot - ); - debug!("AURELIEN: Execution: finalize slot {}: end function", slot); } /// After bootstrap or load from disk, recompute all the caches. diff --git a/massa-pool-worker/src/operation_pool.rs b/massa-pool-worker/src/operation_pool.rs index 279f8150f15..e7f57c1d725 100644 --- a/massa-pool-worker/src/operation_pool.rs +++ b/massa-pool-worker/src/operation_pool.rs @@ -113,10 +113,6 @@ impl OperationPool { .iter() .copied() .collect::>(); - debug!( - "AURELIEN: Op pool: add_operations start with {} ops", - items.len() - ); let mut added = PreHashSet::with_capacity(items.len()); let mut removed = PreHashSet::with_capacity(items.len()); @@ -193,7 +189,6 @@ impl OperationPool { // Clean the removed operations from storage. self.storage.drop_operation_refs(&removed); - debug!("AURELIEN: Op pool: add_operations end"); } /// get operations for block creation @@ -202,10 +197,6 @@ impl OperationPool { /// - fit inside the block /// - is the most profitable for block producer pub fn get_block_operations(&self, slot: &Slot) -> (Vec, Storage) { - debug!( - "AURELIEN: Block prod {}: Op pool: get_block_operations start", - slot - ); // init list of selected operation IDs let mut op_ids = Vec::new(); @@ -220,7 +211,6 @@ impl OperationPool { // iterate over pool operations in the right thread, from best to worst for cursor in self.sorted_ops_per_thread[slot.thread as usize].iter() { - debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations start one iter on op id {}", slot, cursor.get_id()); // if we have reached the maximum number of operations, stop if remaining_ops == 0 { break; @@ -229,7 +219,6 @@ impl OperationPool { .operations .get(&cursor.get_id()) .expect("the operation should be in self.operations at this point"); - debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations one iter on op id {} cursor acquired", slot, cursor.get_id()); // exclude ops for which the block slot is outside of their validity range if !op_info.validity_period_range.contains(&slot.period) { continue; @@ -245,7 +234,6 @@ impl OperationPool { continue; } - debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations one iter on op id {} before unexecuted check", slot, cursor.get_id()); // check if the op was already executed // TODO batch this if self @@ -255,7 +243,6 @@ impl OperationPool { { continue; } - debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations one iter on op id {} after unexecuted check", slot, cursor.get_id()); // check balance //TODO: It's a weird behaviour because if the address is created afterwards this operation will be executed // and also it spams the pool maybe we should just try to put the operation if there is no balance and 0 gas price @@ -275,7 +262,6 @@ impl OperationPool { } else { continue; }; - debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations one iter on op id {} after balance check", slot, cursor.get_id()); if *creator_balance < op_info.fee { continue; } @@ -294,12 +280,7 @@ impl OperationPool { // update balance cache *creator_balance = creator_balance.saturating_sub(op_info.max_spending); - debug!("AURELIEN: Block prod {}: LEVEL2: Op pool: get_block_operations end one iter on op id {}", slot, cursor.get_id()); } - debug!( - "AURELIEN: Block prod {}: Op pool: get_block_operations end iterating", - slot - ); // generate storage let mut res_storage = self.storage.clone_without_refs(); @@ -308,10 +289,6 @@ impl OperationPool { if claimed_ops.len() != claim_ops.len() { panic!("could not claim all operations from storage"); } - debug!( - "AURELIEN: Block prod {}: Op pool: get_block_operations end function", - slot - ); (op_ids, res_storage) } diff --git a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs index b8f1711a065..8d1b2eddf95 100644 --- a/massa-protocol-worker/src/handlers/block_handler/retrieval.rs +++ b/massa-protocol-worker/src/handlers/block_handler/retrieval.rs @@ -32,7 +32,6 @@ use massa_models::{ block::{Block, BlockSerializer}, block_header::SecuredHeader, block_id::BlockId, - config::{GENESIS_TIMESTAMP, T0, THREAD_COUNT}, endorsement::SecureShareEndorsement, operation::{OperationId, SecureShareOperation}, prehash::{CapacityAllocator, PreHashMap, PreHashSet}, @@ -150,13 +149,11 @@ impl RetrievalThread { } match message { BlockMessage::AskForBlocks(block_infos) => { - debug!("AURELIEN: Received block message: AskForBlocks {:?} from {}", block_infos, peer_id); if let Err(err) = self.on_asked_for_blocks_received(peer_id.clone(), block_infos) { warn!("Error in on_asked_for_blocks_received: {:?}", err); } } BlockMessage::ReplyForBlocks(block_infos) => { - debug!("AURELIEN: Received block message: ReplyForBlocks {:?} from {}", block_infos, peer_id); for (block_id, block_info) in block_infos.into_iter() { if let Err(err) = self.on_block_info_received(peer_id.clone(), block_id, block_info) { warn!("Error in on_block_info_received: {:?}", err); @@ -167,7 +164,6 @@ impl RetrievalThread { } } BlockMessage::BlockHeader(header) => { - debug!("AURELIEN: Received block message: BlockHeader {:?} from {}", header, peer_id); massa_trace!(BLOCK_HEADER, { "peer_id": peer_id, "header": header}); if let Ok(Some((block_id, is_new))) = self.note_header_from_peer(&header, &peer_id) @@ -205,7 +201,6 @@ impl RetrievalThread { Ok(command) => { match command { BlockHandlerRetrievalCommand::WishlistDelta { new, remove } => { - debug!("AURELIEN: Received block message: command WishlistDelta {:?} {:?}", new, remove); massa_trace!("protocol.protocol_worker.process_command.wishlist_delta.begin", { "new": new, "remove": remove }); for (block_id, header) in new.into_iter() { self.block_wishlist.insert( @@ -1026,19 +1021,6 @@ impl RetrievalThread { header: header.clone(), operations: block_operation_ids.clone(), }; - let latency = MassaTime::now().unwrap().saturating_sub( - get_block_slot_timestamp( - THREAD_COUNT, - T0, - *GENESIS_TIMESTAMP, - header.content.slot, - ) - .unwrap(), - ); - println!( - "AURELIEN: Finish receive block id {} from peer {} with a latency of {}", - block_id, from_peer_id, latency - ); let mut content_serialized = Vec::new(); BlockSerializer::new() // todo : keep the serializer in the struct to avoid recreating it diff --git a/massa-protocol-worker/src/messages.rs b/massa-protocol-worker/src/messages.rs index d6e59dd85aa..5b6a40f9b0f 100644 --- a/massa-protocol-worker/src/messages.rs +++ b/massa-protocol-worker/src/messages.rs @@ -10,7 +10,6 @@ use peernet::{ MessagesHandler as PeerNetMessagesHandler, MessagesSerializer as PeerNetMessagesSerializer, }, }; -use tracing::debug; use crate::handlers::{ block_handler::{BlockMessage, BlockMessageSerializer}, @@ -228,7 +227,6 @@ pub struct MessagesHandler { impl PeerNetMessagesHandler for MessagesHandler { fn handle(&self, data: &[u8], peer_id: &PeerId) -> PeerNetResult<()> { - debug!("AURELIEN: received message of len {:?}", data.len()); let (data, raw_id) = self .id_deserializer .deserialize::(data) From d92081e041648e8dd247b2ea68789da9f5004cb5 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 15 Jun 2023 18:27:18 +0200 Subject: [PATCH 22/24] update peernet --- Cargo.lock | 2 +- massa-node/Cargo.toml | 2 +- massa-protocol-exports/Cargo.toml | 2 +- massa-protocol-worker/Cargo.toml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b9905257d29..34a4055e87c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3647,7 +3647,7 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "peernet" version = "0.1.0" -source = "git+https://github.com/massalabs/PeerNet?branch=debug_logs#fe9b596161366553cccb0446a43218b288be1315" +source = "git+https://github.com/massalabs/PeerNet?rev=bf8adf5#bf8adf50e2427a6692c65f0dbb53c69b504185e0" dependencies = [ "crossbeam", "enum_delegate", diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index 0a02de3a396..9bb9380566e 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -17,7 +17,7 @@ tracing = { version = "0.1", features = [ "max_level_debug", "release_max_level_debug", ] } -peernet = { git = "https://github.com/massalabs/PeerNet", branch = "debug_logs" } +peernet = { git = "https://github.com/massalabs/PeerNet", rev = "bf8adf5" } tracing-subscriber = "0.3" paw = "1.0" rand = { version = "0.8.5", optional = true } diff --git a/massa-protocol-exports/Cargo.toml b/massa-protocol-exports/Cargo.toml index 31c731c25ce..13cf19ee0d0 100644 --- a/massa-protocol-exports/Cargo.toml +++ b/massa-protocol-exports/Cargo.toml @@ -11,7 +11,7 @@ nom = "=7.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" # TODO tag peernet version -peernet = { git = "https://github.com/massalabs/PeerNet", branch = "debug_logs" } +peernet = { git = "https://github.com/massalabs/PeerNet", rev = "bf8adf5" } tempfile = { version = "3.3", optional = true } # use with testing feature mockall = "0.11.4" diff --git a/massa-protocol-worker/Cargo.toml b/massa-protocol-worker/Cargo.toml index 990591aacbb..88e350aa7db 100644 --- a/massa-protocol-worker/Cargo.toml +++ b/massa-protocol-worker/Cargo.toml @@ -13,7 +13,7 @@ serde_json = "1.0" nom = "=7.1" num_enum = "0.5" # TODO tag peernet version -peernet = { git = "https://github.com/massalabs/PeerNet", branch = "debug_logs" } +peernet = { git = "https://github.com/massalabs/PeerNet", rev = "bf8adf5" } tempfile = { version = "3.3", optional = true } # use with testing feature rayon = "1.7.0" schnellru = "0.2.1" From d55a0d9c0000cce1ffe961a87748fb8826a14db1 Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 15 Jun 2023 18:46:38 +0200 Subject: [PATCH 23/24] Use slot ts for Versioning check --- massa-bootstrap/src/client.rs | 2 +- massa-execution-worker/src/execution.rs | 2 +- massa-final-state/src/final_state.rs | 31 +++++++++++++++---------- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/massa-bootstrap/src/client.rs b/massa-bootstrap/src/client.rs index d88320163e4..d3c29009977 100644 --- a/massa-bootstrap/src/client.rs +++ b/massa-bootstrap/src/client.rs @@ -416,7 +416,7 @@ pub fn get_state( { let mut final_state_guard = final_state.write(); - let only_use_xor = final_state_guard.get_only_use_xor(); + let only_use_xor = final_state_guard.get_only_use_xor(&Slot::new(0, 31)); if !bootstrap_config.keep_ledger { // load ledger from initial ledger file diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 3576806174d..a7bc717122b 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -1747,7 +1747,7 @@ impl ExecutionState { ) }); - let use_only_xor = self.final_state.read().get_only_use_xor(); + let use_only_xor = self.final_state.read().get_only_use_xor(&slot); self.final_state.write().db.write().write_batch( db_batch, diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index 6ab8a67f680..deb74b98df0 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -25,6 +25,7 @@ use parking_lot::RwLock; use rocksdb::IteratorMode; use tracing::{debug, info, warn}; +use massa_models::timeslots::get_block_slot_timestamp; use massa_time::MassaTime; use std::sync::Arc; @@ -125,7 +126,7 @@ impl FinalState { }; if reset_final_state { - let only_use_xor = final_state.get_only_use_xor(); + let only_use_xor = final_state.get_only_use_xor(&slot); final_state.async_pool.reset(only_use_xor); final_state.pos_state.reset(only_use_xor); @@ -242,7 +243,7 @@ impl FinalState { // We compute the draws here because we need to feed_cycles when interpolating final_state.compute_initial_draws()?; - let only_use_xor = final_state.get_only_use_xor(); + let only_use_xor = final_state.get_only_use_xor(&recovered_slot); final_state.interpolate_downtime(only_use_xor)?; @@ -529,11 +530,10 @@ impl FinalState { /// /// USED ONLY FOR BOOTSTRAP pub fn reset(&mut self) { - let only_use_xor = self.get_only_use_xor(); + let slot = Slot::new(0, self.config.thread_count.saturating_sub(1)); + let only_use_xor = self.get_only_use_xor(&slot); - self.db - .write() - .reset(Slot::new(0, self.config.thread_count.saturating_sub(1))); + self.db.write().reset(slot); self.ledger.reset(only_use_xor); self.async_pool.reset(only_use_xor); self.pos_state.reset(only_use_xor); @@ -593,7 +593,7 @@ impl FinalState { &mut db_batch, ); - let only_use_xor = self.get_only_use_xor(); + let only_use_xor = self.get_only_use_xor(&slot); self.db .write() @@ -732,15 +732,22 @@ impl FinalState { } /// Temporary getter to know if we should compute the lsm tree during db writes - pub fn get_only_use_xor(&self) -> bool { - self.get_hash_kind_version() == 1 + pub fn get_only_use_xor(&self, slot: &Slot) -> bool { + let ts = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + slot.clone(), + ) + .unwrap(); + self.get_hash_kind_version(ts) == 1 } - fn get_hash_kind_version(&self) -> u32 { + fn get_hash_kind_version(&self, ts: MassaTime) -> u32 { // Temp code // Return version for hash kind of final state: 0 -> LSM, 1 -> Xor - let now = MassaTime::now().expect("Cannot get current time"); + // let now = MassaTime::now().expect("Cannot get current time"); self.mip_store - .get_latest_component_version_at(&MipComponent::FinalStateHashKind, now) + .get_latest_component_version_at(&MipComponent::FinalStateHashKind, ts) } } From 8975bfdd5a2f70f9f40b0336f6b974ed05754a1d Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 15 Jun 2023 18:51:16 +0200 Subject: [PATCH 24/24] Fix clippy --- massa-execution-worker/src/execution.rs | 2 +- massa-final-state/src/final_state.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 712e71256fa..01aea27b6ae 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -1719,7 +1719,7 @@ impl ExecutionState { ) }); - let use_only_xor = self.final_state.read().get_only_use_xor(&slot); + let use_only_xor = self.final_state.read().get_only_use_xor(slot); self.final_state.write().db.write().write_batch( db_batch, diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index deb74b98df0..9ea565cd739 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -737,7 +737,7 @@ impl FinalState { self.config.thread_count, self.config.t0, self.config.genesis_timestamp, - slot.clone(), + *slot, ) .unwrap(); self.get_hash_kind_version(ts) == 1