Skip to content

Commit

Permalink
Fix for the pruning proof rebuild issue (issue kaspanet#444) (kaspane…
Browse files Browse the repository at this point in the history
…t#449)

* add a strict assertion which should catch the pruning bug before actual data is pruned

* possible fix: add `block_at_depth_2m` as an additional traversal root

* rollback: rollback the previous fix since it's not the root cause

* add additional dbg info to assertion

* bug fix: write level relations for trusted blocks (blocks in the pruning point anticone of a newly synced node)

* enable mainnet mining by default

* simplify kip 9 beta condition + more mass tests

* set default tracked addresses to 1M

* fix tracker prealloc property + adds compile time assertion for upper bound
  • Loading branch information
michaelsutton authored and D-Stacks committed Jul 17, 2024
1 parent b7d1537 commit 13bd80b
Show file tree
Hide file tree
Showing 8 changed files with 180 additions and 150 deletions.
9 changes: 8 additions & 1 deletion consensus/src/pipeline/header_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -452,17 +452,24 @@ impl HeaderProcessor {
let mut batch = WriteBatch::default();

for (level, datum) in ghostdag_data.iter().enumerate() {
// The data might have been already written when applying the pruning proof.
// This data might have been already written when applying the pruning proof.
self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists();
}

let mut relations_write = self.relations_stores.write();
ctx.known_parents.into_iter().enumerate().for_each(|(level, parents_by_level)| {
// This data might have been already written when applying the pruning proof.
relations_write[level].insert_batch(&mut batch, ctx.hash, parents_by_level).unwrap_or_exists();
});

let statuses_write = self.statuses_store.set_batch(&mut batch, ctx.hash, StatusHeaderOnly).unwrap();

// Flush the batch to the DB
self.db.write(batch).unwrap();

// Calling the drops explicitly after the batch is written in order to avoid possible errors.
drop(statuses_write);
drop(relations_write);
}

pub fn process_genesis(&self) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ impl VirtualStateProcessor {
self.populate_mempool_transaction_in_utxo_context(mutable_tx, utxo_view)?;

// For non-activated nets (mainnet, TN10) we can update mempool rules to KIP9 beta asap. For
// TN11 we need to hard-fork consensus first (since the new beta rules are more relaxed)
// TN11 we need to hard-fork consensus first (since the new beta rules are more permissive)
let kip9_version = if self.storage_mass_activation_daa_score == u64::MAX { Kip9Version::Beta } else { Kip9Version::Alpha };

// Calc the full contextual mass including storage mass
Expand Down
201 changes: 73 additions & 128 deletions consensus/src/processes/mass.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,13 @@ impl MassCalculator {
let ins_len = tx.tx().inputs.len() as u64;

/*
KIP-0009 relaxed formula for the cases |O| = 1 OR |O| <= |I| <= 2:
max( 0 , C·( |O|/H(O) - |I|/H(I) ) )
KIP-0009 relaxed formula for the cases |O| = 1 OR |O| <= |I| <= 2:
max( 0 , C·( |O|/H(O) - |I|/H(I) ) )
Note: in the case |I| = 1 both formulas are equal, yet the following code (harmonic_ins) is a bit more efficient.
Hence, we transform the condition to |O| = 1 OR |I| = 1 OR |O| = |I| = 2 which is equivalent (and faster).
*/
if version == Kip9Version::Beta && (outs_len == 1 || (outs_len <= ins_len && ins_len <= 2)) {
if version == Kip9Version::Beta && (outs_len == 1 || ins_len == 1 || (outs_len == 2 && ins_len == 2)) {
let harmonic_ins = tx
.populated_inputs()
.map(|(_, entry)| self.storage_mass_parameter / entry.amount)
Expand Down Expand Up @@ -144,63 +147,8 @@ mod tests {

#[test]
fn test_mass_storage() {
let script_pub_key = ScriptVec::from_slice(&[]);
let prev_tx_id = TransactionId::from_str("880eb9819a31821d9d2399e2f35e2433b72637e393d71ecc9b8d0250f49153c3").unwrap();

// Tx with less outs than ins
let tx = Transaction::new(
0,
vec![
TransactionInput {
previous_outpoint: TransactionOutpoint { transaction_id: prev_tx_id, index: 0 },
signature_script: vec![],
sequence: 0,
sig_op_count: 0,
},
TransactionInput {
previous_outpoint: TransactionOutpoint { transaction_id: prev_tx_id, index: 1 },
signature_script: vec![],
sequence: 1,
sig_op_count: 0,
},
TransactionInput {
previous_outpoint: TransactionOutpoint { transaction_id: prev_tx_id, index: 2 },
signature_script: vec![],
sequence: 2,
sig_op_count: 0,
},
],
vec![
TransactionOutput { value: 300, script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()) },
TransactionOutput { value: 300, script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()) },
],
1615462089000,
SubnetworkId::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
0,
vec![],
);

let entries = vec![
UtxoEntry {
amount: 100,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
block_daa_score: 0,
is_coinbase: false,
},
UtxoEntry {
amount: 200,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
block_daa_score: 0,
is_coinbase: false,
},
UtxoEntry {
amount: 300,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
block_daa_score: 0,
is_coinbase: false,
},
];
let mut tx = MutableTransaction::with_entries(tx, entries);
let mut tx = generate_tx_from_amounts(&[100, 200, 300], &[300, 300]);
let test_version = Kip9Version::Alpha;

// Assert the formula: max( 0 , C·( |O|/H(O) - |I|/A(I) ) )
Expand All @@ -218,74 +166,8 @@ mod tests {
assert_eq!(storage_mass, storage_mass_parameter / 50 + storage_mass_parameter / 550 - 3 * (storage_mass_parameter / 200));

// Create a tx with more outs than ins
let tx = Transaction::new(
0,
vec![
TransactionInput {
previous_outpoint: TransactionOutpoint { transaction_id: prev_tx_id, index: 0 },
signature_script: vec![],
sequence: 0,
sig_op_count: 0,
},
TransactionInput {
previous_outpoint: TransactionOutpoint { transaction_id: prev_tx_id, index: 1 },
signature_script: vec![],
sequence: 1,
sig_op_count: 0,
},
TransactionInput {
previous_outpoint: TransactionOutpoint { transaction_id: prev_tx_id, index: 2 },
signature_script: vec![],
sequence: 2,
sig_op_count: 0,
},
],
vec![
TransactionOutput {
value: 10_000 * SOMPI_PER_KASPA,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
},
TransactionOutput {
value: 10_000 * SOMPI_PER_KASPA,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
},
TransactionOutput {
value: 10_000 * SOMPI_PER_KASPA,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
},
TransactionOutput {
value: 10_000 * SOMPI_PER_KASPA,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
},
],
1615462089000,
SubnetworkId::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
0,
vec![],
);

let entries = vec![
UtxoEntry {
amount: 10_000 * SOMPI_PER_KASPA,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
block_daa_score: 0,
is_coinbase: false,
},
UtxoEntry {
amount: 10_000 * SOMPI_PER_KASPA,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
block_daa_score: 0,
is_coinbase: false,
},
UtxoEntry {
amount: 20_000 * SOMPI_PER_KASPA,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
block_daa_score: 0,
is_coinbase: false,
},
];
let mut tx = MutableTransaction::with_entries(tx, entries);

let base_value = 10_000 * SOMPI_PER_KASPA;
let mut tx = generate_tx_from_amounts(&[base_value, base_value, base_value * 2], &[base_value; 4]);
let storage_mass_parameter = STORAGE_MASS_PARAMETER;
let storage_mass =
MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap();
Expand All @@ -305,7 +187,70 @@ mod tests {
let storage_mass =
MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap();
assert_eq!(storage_mass, 0);
}

#[test]
fn test_mass_storage_beta() {
// 2:2 transaction
let mut tx = generate_tx_from_amounts(&[100, 200], &[50, 250]);
let storage_mass_parameter = 10u64.pow(12);
let test_version = Kip9Version::Beta;
// Assert the formula: max( 0 , C·( |O|/H(O) - |I|/O(I) ) )

let storage_mass =
MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap();
assert_eq!(storage_mass, 9000000000);

// Set outputs to be equal to inputs
tx.tx.outputs[0].value = 100;
tx.tx.outputs[1].value = 200;
let storage_mass =
MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap();
assert_eq!(storage_mass, 0);

drop(script_pub_key);
// Remove an output and make sure the other is small enough to make storage mass greater than zero
tx.tx.outputs.pop();
tx.tx.outputs[0].value = 50;
let storage_mass =
MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap();
assert_eq!(storage_mass, 5000000000);
}

fn generate_tx_from_amounts(ins: &[u64], outs: &[u64]) -> MutableTransaction<Transaction> {
let script_pub_key = ScriptVec::from_slice(&[]);
let prev_tx_id = TransactionId::from_str("880eb9819a31821d9d2399e2f35e2433b72637e393d71ecc9b8d0250f49153c3").unwrap();
let tx = Transaction::new(
0,
(0..ins.len())
.map(|i| TransactionInput {
previous_outpoint: TransactionOutpoint { transaction_id: prev_tx_id, index: i as u32 },
signature_script: vec![],
sequence: 0,
sig_op_count: 0,
})
.collect(),
outs.iter()
.copied()
.map(|out_amount| TransactionOutput {
value: out_amount,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
})
.collect(),
1615462089000,
SubnetworkId::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
0,
vec![],
);
let entries = ins
.iter()
.copied()
.map(|in_amount| UtxoEntry {
amount: in_amount,
script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()),
block_daa_score: 0,
is_coinbase: false,
})
.collect();
MutableTransaction::with_entries(tx, entries)
}
}
56 changes: 56 additions & 0 deletions consensus/src/processes/pruning_proof/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -691,11 +691,67 @@ impl PruningProofManager {
}
}

// Temp assertion for verifying a bug fix: assert that the full 2M chain is actually contained in the composed level proof
let set = BlockHashSet::from_iter(headers.iter().map(|h| h.hash));
let chain_2m = self
.chain_up_to_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m)
.map_err(|err| {
dbg!(level, selected_tip, block_at_depth_2m, root);
format!("Assert 2M chain -- level: {}, err: {}", level, err)
})
.unwrap();
let chain_2m_len = chain_2m.len();
for (i, chain_hash) in chain_2m.into_iter().enumerate() {
if !set.contains(&chain_hash) {
let next_level_tip = selected_tip_by_level[level + 1];
let next_level_chain_m =
self.chain_up_to_depth(&*self.ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap();
let next_level_block_m = next_level_chain_m.last().copied().unwrap();
dbg!(next_level_chain_m.len());
dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score);
dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score);
dbg!(self.ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score);
dbg!(self.ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score);
dbg!(level, selected_tip, block_at_depth_2m, root);
panic!("Assert 2M chain -- missing block {} at index {} out of {} chain blocks", chain_hash, i, chain_2m_len);
}
}

headers
})
.collect_vec()
}

/// Copy of `block_at_depth` which returns the full chain up to depth. Temporarily used for assertion purposes.
fn chain_up_to_depth(
&self,
ghostdag_store: &impl GhostdagStoreReader,
high: Hash,
depth: u64,
) -> Result<Vec<Hash>, PruningProofManagerInternalError> {
let high_gd = ghostdag_store
.get_compact_data(high)
.map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {depth}, {err}")))?;
let mut current_gd = high_gd;
let mut current = high;
let mut res = vec![current];
while current_gd.blue_score + depth >= high_gd.blue_score {
if current_gd.selected_parent.is_origin() {
break;
}
let prev = current;
current = current_gd.selected_parent;
res.push(current);
current_gd = ghostdag_store.get_compact_data(current).map_err(|err| {
PruningProofManagerInternalError::BlockAtDepth(format!(
"high: {}, depth: {}, current: {}, high blue score: {}, current blue score: {}, {}",
high, depth, prev, high_gd.blue_score, current_gd.blue_score, err
))
})?;
}
Ok(res)
}

fn block_at_depth(
&self,
ghostdag_store: &impl GhostdagStoreReader,
Expand Down
16 changes: 11 additions & 5 deletions kaspad/src/args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,9 +106,9 @@ impl Default for Args {
outbound_target: 8,
inbound_limit: 128,
rpc_max_clients: 128,
max_tracked_addresses: Tracker::DEFAULT_MAX_ADDRESSES,
max_tracked_addresses: 0,
enable_unsynced_mining: false,
enable_mainnet_mining: false,
enable_mainnet_mining: true,
testnet: false,
testnet_suffix: 10,
devnet: false,
Expand Down Expand Up @@ -308,16 +308,17 @@ pub fn cli() -> Command {
.long("enable-mainnet-mining")
.action(ArgAction::SetTrue)
.hide(true)
.help("Allow mainnet mining (do not use unless you know what you are doing)"),
.help("Allow mainnet mining (currently enabled by default while the flag is kept for backwards compatibility)"),
)
.arg(arg!(--utxoindex "Enable the UTXO index"))
.arg(
Arg::new("max-tracked-addresses")
.long("max-tracked-addresses")
.require_equals(true)
.value_parser(clap::value_parser!(usize))
.help(format!("Max preallocated number of addresses tracking UTXO changed events (default: {}, maximum: {}).
Value 0 prevents the preallocation, leading to a 0 memory footprint as long as unused but then to a sub-optimal footprint when used.", Tracker::DEFAULT_MAX_ADDRESSES, Tracker::MAX_ADDRESS_UPPER_BOUND)),
.help(format!("Max (preallocated) number of addresses being tracked for UTXO changed events (default: {}, maximum: {}).
Setting to 0 prevents the preallocation and sets the maximum to {}, leading to 0 memory footprint as long as unused but to sub-optimal footprint if used.",
0, Tracker::MAX_ADDRESS_UPPER_BOUND, Tracker::DEFAULT_MAX_ADDRESSES)),
)
.arg(arg!(--testnet "Use the test network"))
.arg(
Expand Down Expand Up @@ -455,6 +456,11 @@ impl Args {
#[cfg(feature = "devnet-prealloc")]
prealloc_amount: arg_match_unwrap_or::<u64>(&m, "prealloc-amount", defaults.prealloc_amount),
};

if arg_match_unwrap_or::<bool>(&m, "enable-mainnet-mining", false) {
println!("\nNOTE: The flag --enable-mainnet-mining is deprecated and defaults to true also w/o explicit setting\n")
}

Ok(args)
}
}
Expand Down
Loading

0 comments on commit 13bd80b

Please sign in to comment.