Skip to content

Commit

Permalink
Merge branch 'master' of github.com:aspectron/rusty-kaspa into wallet…
Browse files Browse the repository at this point in the history
…-store
  • Loading branch information
aspect committed Jun 25, 2023
2 parents 7717269 + cca338f commit 31cd5bb
Show file tree
Hide file tree
Showing 47 changed files with 521 additions and 313 deletions.
3 changes: 2 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

22 changes: 16 additions & 6 deletions consensus/core/src/config/bps.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,7 @@ impl<const BPS: u64> Bps<BPS> {
}

pub const fn pruning_proof_m() -> u64 {
// Since the important levels remain logarithmically long, it seems that this
// constant does not need to scale with BPS.
// TODO: finalize this
// No need to scale this constant with BPS since the important block levels (higher) remain logarithmically long
PRUNING_PROOF_M
}

Expand All @@ -105,9 +103,21 @@ impl<const BPS: u64> Bps<BPS> {
BPS * LEGACY_COINBASE_MATURITY
}

// TODO: we might need to increase max_block_level (at least for mainnet) as a function of BPS
// since higher BPS means easier difficulty puzzles -> less zeros in pow hash
// pub const fn max_block_level() -> u64 { }
/// DAA score after which the pre-deflationary period switches to the deflationary period.
///
/// This number is calculated as follows:
///
/// - We define a year as 365.25 days
/// - Half a year in seconds = 365.25 / 2 * 24 * 60 * 60 = 15778800
/// - The network was down for three days shortly after launch
/// - Three days in seconds = 3 * 24 * 60 * 60 = 259200
pub const fn deflationary_phase_daa_score() -> u64 {
BPS * (15778800 - 259200)
}

pub const fn pre_deflationary_phase_base_subsidy() -> u64 {
50000000000 / BPS
}
}

#[cfg(test)]
Expand Down
4 changes: 2 additions & 2 deletions consensus/core/src/config/constants.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ pub mod consensus {
pub const LEGACY_TIMESTAMP_DEVIATION_TOLERANCE: u64 = 132;

/// **New** timestamp deviation tolerance (seconds).
/// KIP-0004: 605 (~10 minutes)
pub const NEW_TIMESTAMP_DEVIATION_TOLERANCE: u64 = 605;
/// TODO: KIP-0004: 605 (~10 minutes)
pub const NEW_TIMESTAMP_DEVIATION_TOLERANCE: u64 = 132;

/// The desired interval between samples of the median time window (seconds).
/// KIP-0004: 10 seconds
Expand Down
16 changes: 7 additions & 9 deletions consensus/core/src/config/params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,10 @@ pub struct Params {
pub mass_per_script_pub_key_byte: u64,
pub mass_per_sig_op: u64,
pub max_block_mass: u64,

/// DAA score after which the pre-deflationary period switches to the deflationary period
pub deflationary_phase_daa_score: u64,

pub pre_deflationary_phase_base_subsidy: u64,
pub coinbase_maturity: u64,
pub skip_proof_of_work: bool,
Expand Down Expand Up @@ -394,7 +397,8 @@ pub const TESTNET_PARAMS: Params = Params {

pub const TESTNET11_PARAMS: Params = Params {
dns_seeders: &[
// No seeders yet
// This DNS seeder is run by Tiram
"seeder1-testnet-11.kaspad.net",
],
net: NetworkId::with_suffix(NetworkType::Testnet, 11),
genesis: TESTNET11_GENESIS,
Expand All @@ -421,6 +425,8 @@ pub const TESTNET11_PARAMS: Params = Params {
finality_depth: Testnet11Bps::finality_depth(),
pruning_depth: Testnet11Bps::pruning_depth(),
pruning_proof_m: Testnet11Bps::pruning_proof_m(),
deflationary_phase_daa_score: Testnet11Bps::deflationary_phase_daa_score(),
pre_deflationary_phase_base_subsidy: Testnet11Bps::pre_deflationary_phase_base_subsidy(),
coinbase_maturity: Testnet11Bps::coinbase_maturity(),

coinbase_payload_script_public_key_max_len: 150,
Expand All @@ -440,14 +446,6 @@ pub const TESTNET11_PARAMS: Params = Params {
mass_per_sig_op: 1000,
max_block_mass: 500_000,

// deflationary_phase_daa_score is the DAA score after which the pre-deflationary period
// switches to the deflationary period. This number is calculated as follows:
// We define a year as 365.25 days
// Half a year in seconds = 365.25 / 2 * 24 * 60 * 60 = 15778800
// The network was down for three days shortly after launch
// Three days in seconds = 3 * 24 * 60 * 60 = 259200
deflationary_phase_daa_score: 15778800 - 259200,
pre_deflationary_phase_base_subsidy: 50000000000,
skip_proof_of_work: false,
max_block_level: 250,
};
Expand Down
11 changes: 11 additions & 0 deletions consensus/core/src/networktype.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,17 @@ impl NetworkId {
NetworkType::Devnet => 16611,
}
}

pub fn iter() -> impl Iterator<Item = Self> {
static NETWORK_IDS: [NetworkId; 5] = [
NetworkId::new(NetworkType::Mainnet),
NetworkId::with_suffix(NetworkType::Testnet, 10),
NetworkId::with_suffix(NetworkType::Testnet, 11),
NetworkId::new(NetworkType::Devnet),
NetworkId::new(NetworkType::Simnet),
];
NETWORK_IDS.iter().copied()
}
}

impl Deref for NetworkId {
Expand Down
1 change: 1 addition & 0 deletions consensus/src/consensus/services.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ impl ConsensusServices {
params.max_coinbase_payload_len,
params.deflationary_phase_daa_score,
params.pre_deflationary_phase_base_subsidy,
params.target_time_per_block,
);

let mass_calculator =
Expand Down
33 changes: 15 additions & 18 deletions consensus/src/model/stores/relations.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
use itertools::Itertools;
use kaspa_consensus_core::BlockHashSet;
use kaspa_consensus_core::{blockhash::BlockHashes, BlockHashMap, BlockHasher, BlockLevel, HashMapCustomHasher};
use kaspa_database::prelude::MemoryWriter;
use kaspa_database::prelude::StoreError;
use kaspa_database::prelude::DB;
use kaspa_database::prelude::{BatchDbWriter, DbWriter};
use kaspa_database::prelude::{CachedDbAccess, DbKey, DirectDbWriter};
use kaspa_database::prelude::{DirectWriter, MemoryWriter};
use kaspa_database::registry::{DatabaseStorePrefixes, SEPARATOR};
use kaspa_hashes::Hash;
use parking_lot::{RwLockUpgradableReadGuard, RwLockWriteGuard};
use rocksdb::WriteBatch;
use std::sync::Arc;

Expand All @@ -24,7 +23,7 @@ pub trait RelationsStoreReader {

/// Low-level write API for `RelationsStore`
pub trait RelationsStore: RelationsStoreReader {
type DefaultWriter: DbWriter;
type DefaultWriter: DirectWriter;
fn default_writer(&self) -> Self::DefaultWriter;

fn set_parents(&mut self, writer: impl DbWriter, hash: Hash, parents: BlockHashes) -> Result<(), StoreError>;
Expand Down Expand Up @@ -109,34 +108,32 @@ impl RelationsStore for DbRelationsStore {
}

pub struct StagingRelationsStore<'a> {
store_read: RwLockUpgradableReadGuard<'a, DbRelationsStore>,
store: &'a mut DbRelationsStore,
staging_parents_writes: BlockHashMap<BlockHashes>,
staging_children_writes: BlockHashMap<BlockHashes>,
staging_deletions: BlockHashSet,
}

impl<'a> StagingRelationsStore<'a> {
pub fn new(store_read: RwLockUpgradableReadGuard<'a, DbRelationsStore>) -> Self {
pub fn new(store: &'a mut DbRelationsStore) -> Self {
Self {
store_read,
store,
staging_parents_writes: Default::default(),
staging_children_writes: Default::default(),
staging_deletions: Default::default(),
}
}

pub fn commit(self, batch: &mut WriteBatch) -> Result<RwLockWriteGuard<'a, DbRelationsStore>, StoreError> {
let store_write = RwLockUpgradableReadGuard::upgrade(self.store_read);
pub fn commit(self, batch: &mut WriteBatch) -> Result<(), StoreError> {
for (k, v) in self.staging_parents_writes {
store_write.parents_access.write(BatchDbWriter::new(batch), k, v)?
self.store.parents_access.write(BatchDbWriter::new(batch), k, v)?
}
for (k, v) in self.staging_children_writes {
store_write.children_access.write(BatchDbWriter::new(batch), k, v)?
self.store.children_access.write(BatchDbWriter::new(batch), k, v)?
}
// Deletions always come after mutations
store_write.parents_access.delete_many(BatchDbWriter::new(batch), &mut self.staging_deletions.iter().copied())?;
store_write.children_access.delete_many(BatchDbWriter::new(batch), &mut self.staging_deletions.iter().copied())?;
Ok(store_write)
self.store.parents_access.delete_many(BatchDbWriter::new(batch), &mut self.staging_deletions.iter().copied())?;
self.store.children_access.delete_many(BatchDbWriter::new(batch), &mut self.staging_deletions.iter().copied())
}

fn check_not_in_deletions(&self, hash: Hash) -> Result<(), StoreError> {
Expand Down Expand Up @@ -179,7 +176,7 @@ impl RelationsStoreReader for StagingRelationsStore<'_> {
if let Some(data) = self.staging_parents_writes.get(&hash) {
Ok(BlockHashes::clone(data))
} else {
self.store_read.get_parents(hash)
self.store.get_parents(hash)
}
}

Expand All @@ -188,20 +185,20 @@ impl RelationsStoreReader for StagingRelationsStore<'_> {
if let Some(data) = self.staging_children_writes.get(&hash) {
Ok(BlockHashes::clone(data))
} else {
self.store_read.get_children(hash)
self.store.get_children(hash)
}
}

fn has(&self, hash: Hash) -> Result<bool, StoreError> {
if self.staging_deletions.contains(&hash) {
return Ok(false);
}
Ok(self.staging_parents_writes.contains_key(&hash) || self.store_read.has(hash)?)
Ok(self.staging_parents_writes.contains_key(&hash) || self.store.has(hash)?)
}

fn counts(&self) -> Result<(usize, usize), StoreError> {
Ok((
self.store_read
self.store
.parents_access
.iterator()
.map(|r| r.unwrap().0)
Expand All @@ -211,7 +208,7 @@ impl RelationsStoreReader for StagingRelationsStore<'_> {
.collect::<BlockHashSet>()
.difference(&self.staging_deletions)
.count(),
self.store_read
self.store
.children_access
.iterator()
.map(|r| r.unwrap().0)
Expand Down
9 changes: 6 additions & 3 deletions consensus/src/pipeline/pruning_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,8 @@ impl PruningProcessor {
if !keep_blocks.contains(&current) {
let mut batch = WriteBatch::default();
let mut level_relations_write = self.relations_stores.write();
let mut staging_relations = StagingRelationsStore::new(self.reachability_relations_store.upgradable_read());
let mut reachability_relations_write = self.reachability_relations_store.write();
let mut staging_relations = StagingRelationsStore::new(&mut reachability_relations_write);
let mut staging_reachability = StagingReachabilityStore::new(reachability_read);
let mut statuses_write = self.statuses_store.write();

Expand All @@ -370,8 +371,10 @@ impl PruningProcessor {
// TODO: consider adding block level to compact header data
let block_level = self.headers_store.get_header_with_block_level(current).unwrap().block_level;
(0..=block_level as usize).for_each(|level| {
relations::delete_level_relations(BatchDbWriter::new(&mut batch), &mut level_relations_write[level], current)
let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[level]);
relations::delete_level_relations(MemoryWriter::default(), &mut staging_level_relations, current)
.unwrap_option();
staging_level_relations.commit(&mut batch).unwrap();
self.ghostdag_stores[level].delete_batch(&mut batch, current).unwrap_option();
});

Expand All @@ -388,7 +391,7 @@ impl PruningProcessor {
}

let reachability_write = staging_reachability.commit(&mut batch).unwrap();
let reachability_relations_write = staging_relations.commit(&mut batch).unwrap();
staging_relations.commit(&mut batch).unwrap();

// Flush the batch to the DB
self.db.write(batch).unwrap();
Expand Down
32 changes: 23 additions & 9 deletions consensus/src/pipeline/virtual_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ use crate::{
acceptance_data::{AcceptanceDataStoreReader, DbAcceptanceDataStore},
block_transactions::{BlockTransactionsStoreReader, DbBlockTransactionsStore},
daa::DbDaaStore,
depth::{DbDepthStore, DepthStoreReader},
ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStoreReader},
headers::{DbHeadersStore, HeaderStoreReader},
past_pruning_points::DbPastPruningPointsStore,
Expand Down Expand Up @@ -77,6 +78,7 @@ use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender
use itertools::Itertools;
use kaspa_utils::binary_heap::BinaryHeapExtensions;
use parking_lot::{RwLock, RwLockUpgradableReadGuard};
use rand::seq::SliceRandom;
use rayon::ThreadPool;
use rocksdb::WriteBatch;
use std::{
Expand Down Expand Up @@ -115,6 +117,7 @@ pub struct VirtualStateProcessor {
pub(super) pruning_point_store: Arc<RwLock<DbPruningStore>>,
pub(super) past_pruning_points_store: Arc<DbPastPruningPointsStore>,
pub(super) body_tips_store: Arc<RwLock<DbTipsStore>>,
pub(super) depth_store: Arc<DbDepthStore>,

// Utxo-related stores
pub(super) utxo_diffs_store: Arc<DbUtxoDiffsStore>,
Expand Down Expand Up @@ -180,6 +183,7 @@ impl VirtualStateProcessor {
pruning_point_store: storage.pruning_point_store.clone(),
past_pruning_points_store: storage.past_pruning_points_store.clone(),
body_tips_store: storage.body_tips_store.clone(),
depth_store: storage.depth_store.clone(),
utxo_diffs_store: storage.utxo_diffs_store.clone(),
utxo_multisets_store: storage.utxo_multisets_store.clone(),
acceptance_data_store: storage.acceptance_data_store.clone(),
Expand Down Expand Up @@ -500,7 +504,21 @@ impl VirtualStateProcessor {
diff_point = self.calculate_utxo_state_relatively(stores, diff, diff_point, candidate);
if diff_point == candidate {
// This indicates that candidate has valid UTXO state and that `diff` represents its diff from virtual
return (candidate, heap.into_sorted_iter().take(self.max_virtual_parent_candidates()).map(|s| s.hash).collect());

// All blocks with lower blue work than filtering_root are:
// 1. not in its future (bcs blue work is monotonic),
// 2. will be removed eventually by the bounded merge check.
// Hence as an optimization we prefer removing such blocks in advance to allow valid tips to be considered.
let filtering_root = self.depth_store.merge_depth_root(candidate).unwrap();
let filtering_blue_work = self.ghostdag_primary_store.get_blue_work(filtering_root).unwrap_or_default();
return (
candidate,
heap.into_sorted_iter()
.take(self.max_virtual_parent_candidates())
.take_while(|s| s.blue_work >= filtering_blue_work)
.map(|s| s.hash)
.collect(),
);
} else {
debug!("Block candidate {} has invalid UTXO state and is ignored from Virtual chain.", candidate)
}
Expand Down Expand Up @@ -530,14 +548,10 @@ impl VirtualStateProcessor {
// TODO: tests
let max_block_parents = self.max_block_parents as usize;

// Prioritize half the blocks with highest blue work and half with lowest, so the network will merge splits faster.
if candidates.len() >= max_block_parents {
let max_additional_parents = max_block_parents - 1; // We already have the selected parent
let mut j = candidates.len() - 1;
for i in max_additional_parents / 2..max_additional_parents {
candidates.swap(i, j);
j -= 1;
}
// Prioritize half the blocks with highest blue work and pick the rest randomly to ensure diversity between nodes
if candidates.len() > max_block_parents / 2 {
// `make_contiguous` should be a no op since the deque was just built
candidates.make_contiguous()[max_block_parents / 2..].shuffle(&mut rand::thread_rng());
}

let mut virtual_parents = Vec::with_capacity(min(max_block_parents, candidates.len() + 1));
Expand Down
3 changes: 2 additions & 1 deletion consensus/src/processes/block_depth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,8 @@ impl<S: DepthStoreReader, U: ReachabilityStoreReader, V: GhostdagStoreReader> Bl
};

// In this case we expect the pruning point or a block above it to be the block at depth.
// Note that above we already verified the chain and distance conditions for this
// Note that above we already verified the chain and distance conditions for this.
// Additionally observe that if `current` is a valid hash it must not be pruned for the same reason.
if current == ORIGIN {
current = pruning_point;
}
Expand Down
Loading

0 comments on commit 31cd5bb

Please sign in to comment.