From b5dba770569402bfb2363f29d5e7cb3614add0ad Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Mon, 13 Jan 2020 14:13:52 -0700 Subject: [PATCH] Rename blocktree to blockstore (#7757) automerge --- banking-bench/src/main.rs | 10 +- book/art/validator.bob | 6 +- book/src/SUMMARY.md | 4 +- book/src/cluster/managing-forks.md | 2 +- .../{blocktree.md => blockstore.md} | 42 +- .../implemented-proposals/repair-service.md | 16 +- .../validator-timestamp-oracle.md | 2 +- book/src/proposals/tick-verification.md | 12 +- book/src/proposals/validator-proposal.md | 1 - .../tvu/{blocktree.md => blockstore.md} | 42 +- core/Cargo.toml | 2 +- core/benches/banking_stage.rs | 20 +- core/benches/{blocktree.rs => blockstore.rs} | 50 +- core/src/archiver.rs | 48 +- core/src/banking_stage.rs | 95 +- core/src/blockstream_service.rs | 22 +- core/src/broadcast_stage.rs | 61 +- .../broadcast_fake_shreds_run.rs | 12 +- .../fail_entry_verification_broadcast_run.rs | 14 +- .../broadcast_stage/standard_broadcast_run.rs | 81 +- core/src/chacha.rs | 22 +- core/src/chacha_cuda.rs | 26 +- core/src/cluster_info.rs | 99 +- core/src/cluster_info_repair_listener.rs | 78 +- core/src/gossip_service.rs | 6 +- core/src/ledger_cleanup_service.rs | 50 +- core/src/poh_recorder.rs | 172 ++-- core/src/poh_service.rs | 10 +- core/src/repair_service.rs | 162 ++-- core/src/replay_stage.rs | 126 +-- core/src/result.rs | 10 +- core/src/retransmit_stage.rs | 12 +- core/src/rpc.rs | 74 +- core/src/rpc_service.rs | 10 +- core/src/shred_fetch_stage.rs | 2 +- core/src/storage_stage.rs | 16 +- core/src/tpu.rs | 6 +- core/src/transaction_status_service.rs | 10 +- core/src/tvu.rs | 30 +- core/src/validator.rs | 52 +- core/src/window_service.rs | 62 +- core/tests/ledger_cleanup.rs | 39 +- core/tests/storage_stage.rs | 16 +- genesis/src/main.rs | 2 +- ledger-tool/src/main.rs | 64 +- ledger/src/bank_forks_utils.rs | 16 +- ledger/src/{blocktree.rs => blockstore.rs} | 894 +++++++++--------- .../src/{blocktree_db.rs => blockstore_db.rs} | 16 +- .../{blocktree_meta.rs => blockstore_meta.rs} | 2 +- ...e_processor.rs => blockstore_processor.rs} | 326 ++++--- ledger/src/erasure.rs | 4 +- ledger/src/leader_schedule_cache.rs | 33 +- ledger/src/lib.rs | 8 +- ledger/src/rooted_slot_iterator.rs | 74 +- ledger/tests/{blocktree.rs => blockstore.rs} | 22 +- local-cluster/src/cluster_tests.rs | 16 +- local-cluster/tests/archiver.rs | 17 +- local-cluster/tests/local_cluster.rs | 18 +- .../dashboards/testnet-monitor.json | 6 +- 59 files changed, 1616 insertions(+), 1534 deletions(-) rename book/src/implemented-proposals/{blocktree.md => blockstore.md} (62%) rename book/src/validator/tvu/{blocktree.md => blockstore.md} (62%) rename core/benches/{blocktree.rs => blockstore.rs} (71%) rename ledger/src/{blocktree.rs => blockstore.rs} (86%) rename ledger/src/{blocktree_db.rs => blockstore_db.rs} (98%) rename ledger/src/{blocktree_meta.rs => blockstore_meta.rs} (99%) rename ledger/src/{blocktree_processor.rs => blockstore_processor.rs} (88%) rename ledger/tests/{blocktree.rs => blockstore.rs} (60%) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 6790fb0ca5a7b8..c793f0d5a1d7af 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -10,7 +10,7 @@ use solana_core::packet::to_packets_chunked; use solana_core::poh_recorder::PohRecorder; use solana_core::poh_recorder::WorkingBankEntry; use solana_ledger::bank_forks::BankForks; -use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; +use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}; use solana_measure::measure::Measure; use solana_runtime::bank::Bank; use solana_sdk::hash::Hash; @@ -139,11 +139,11 @@ fn main() { let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = Arc::new( - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), + let blockstore = Arc::new( + Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), ); let (exit, poh_recorder, poh_service, signal_receiver) = - create_test_recorder(&bank, &blocktree, None); + create_test_recorder(&bank, &blockstore, None); let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); let cluster_info = Arc::new(RwLock::new(cluster_info)); let banking_stage = BankingStage::new( @@ -302,5 +302,5 @@ fn main() { sleep(Duration::from_secs(1)); debug!("waited for poh_service"); } - let _unused = Blocktree::destroy(&ledger_path); + let _unused = Blockstore::destroy(&ledger_path); } diff --git a/book/art/validator.bob b/book/art/validator.bob index 74ed0846accd7b..87c7610a9a0116 100644 --- a/book/art/validator.bob +++ b/book/art/validator.bob @@ -18,9 +18,9 @@ | | `-------` `--------` `--+---------` | | | | | | | ^ ^ | | | `------------` | | | | | v | | | - | | | .--+--------. | | | - | | | | Blocktree | | | | - | | | `-----------` | | .------------. | + | | | .--+---------. | | | + | | | | Blockstore | | | | + | | | `------------` | | .------------. | | | | ^ | | | | | | | | | | | | Downstream | | | | .--+--. .-------+---. | | | Validators | | diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index aaf27733154d90..ff0a57f591e9d5 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -21,7 +21,7 @@ * [Anatomy of a Validator](validator/README.md) * [TPU](validator/tpu.md) * [TVU](validator/tvu/README.md) - * [Blocktree](validator/tvu/blocktree.md) + * [Blockstore](validator/tvu/blockstore.md) * [Gossip Service](validator/gossip.md) * [The Runtime](validator/runtime.md) * [Anatomy of a Transaction](transaction.md) @@ -62,7 +62,7 @@ * [Block Confirmation](proposals/block-confirmation.md) * [ABI Management](proposals/abi-management.md) * [Implemented Design Proposals](implemented-proposals/README.md) - * [Blocktree](implemented-proposals/blocktree.md) + * [Blockstore](implemented-proposals/blockstore.md) * [Cluster Software Installation and Updates](implemented-proposals/installer.md) * [Cluster Economics](implemented-proposals/ed_overview/README.md) * [Validation-client Economics](implemented-proposals/ed_overview/ed_validation_client_economics/README.md) diff --git a/book/src/cluster/managing-forks.md b/book/src/cluster/managing-forks.md index 30dfe3f91df8bf..e00ff6cc3b8fca 100644 --- a/book/src/cluster/managing-forks.md +++ b/book/src/cluster/managing-forks.md @@ -1,6 +1,6 @@ # Managing Forks -The ledger is permitted to fork at slot boundaries. The resulting data structure forms a tree called a _blocktree_. When the validator interprets the blocktree, it must maintain state for each fork in the chain. We call each instance an _active fork_. It is the responsibility of a validator to weigh those forks, such that it may eventually select a fork. +The ledger is permitted to fork at slot boundaries. The resulting data structure forms a tree called a _blockstore_. When the validator interprets the blockstore, it must maintain state for each fork in the chain. We call each instance an _active fork_. It is the responsibility of a validator to weigh those forks, such that it may eventually select a fork. A validator selects a fork by submiting a vote to a slot leader on that fork. The vote commits the validator for a duration of time called a _lockout period_. The validator is not permitted to vote on a different fork until that lockout period expires. Each subsequent vote on the same fork doubles the length of the lockout period. After some cluster-configured number of votes \(currently 32\), the length of the lockout period reaches what's called _max lockout_. Until the max lockout is reached, the validator has the option to wait until the lockout period is over and then vote on another fork. When it votes on another fork, it performs a operation called _rollback_, whereby the state rolls back in time to a shared checkpoint and then jumps forward to the tip of the fork that it just voted on. The maximum distance that a fork may roll back is called the _rollback depth_. Rollback depth is the number of votes required to achieve max lockout. Whenever a validator votes, any checkpoints beyond the rollback depth become unreachable. That is, there is no scenario in which the validator will need to roll back beyond rollback depth. It therefore may safely _prune_ unreachable forks and _squash_ all checkpoints beyond rollback depth into the root checkpoint. diff --git a/book/src/implemented-proposals/blocktree.md b/book/src/implemented-proposals/blockstore.md similarity index 62% rename from book/src/implemented-proposals/blocktree.md rename to book/src/implemented-proposals/blockstore.md index 6e3e1718967cc7..1c51648c47ae6e 100644 --- a/book/src/implemented-proposals/blocktree.md +++ b/book/src/implemented-proposals/blockstore.md @@ -1,16 +1,16 @@ -# Blocktree +# Blockstore -After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blocktree_ data structure described here is how a validator copes with those forks until blocks are finalized. +After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized. -The blocktree allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot. +The blockstore allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot. Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them. -Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blocktree. +Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blockstore. -## Functionalities of Blocktree +## Functionalities of Blockstore -1. Persistence: the Blocktree lives in the front of the nodes verification +1. Persistence: the Blockstore lives in the front of the nodes verification pipeline, right behind network receive and signature verification. If the @@ -20,26 +20,26 @@ Repair requests for recent shreds are served out of RAM or recent files and out 2. Repair: repair is the same as window repair above, but able to serve any - shred that's been received. Blocktree stores shreds with signatures, + shred that's been received. Blockstore stores shreds with signatures, preserving the chain of origination. -3. Forks: Blocktree supports random access of shreds, so can support a +3. Forks: Blockstore supports random access of shreds, so can support a validator's need to rollback and replay from a Bank checkpoint. -4. Restart: with proper pruning/culling, the Blocktree can be replayed by +4. Restart: with proper pruning/culling, the Blockstore can be replayed by ordered enumeration of entries from slot 0. The logic of the replay stage \(i.e. dealing with forks\) will have to be used for the most recent entries in - the Blocktree. + the Blockstore. -## Blocktree Design +## Blockstore Design -1. Entries in the Blocktree are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\). -2. The Blocktree maintains metadata for each slot, in the `SlotMeta` struct containing: +1. Entries in the Blockstore are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\). +2. The Blockstore maintains metadata for each slot, in the `SlotMeta` struct containing: * `slot_index` - The index of this slot * `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\) * `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\). @@ -53,16 +53,16 @@ Repair requests for recent shreds are served out of RAM or recent files and out is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\) 3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`. -4. Subscriptions - The Blocktree records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blocktree channel for consumption by the ReplayStage. See the `Blocktree APIs` for details. -5. Update notifications - The Blocktree notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`. +4. Subscriptions - The Blockstore records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blockstore channel for consumption by the ReplayStage. See the `Blockstore APIs` for details. +5. Update notifications - The Blockstore notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`. -## Blocktree APIs +## Blockstore APIs -The Blocktree offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blocktree. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec`: Returns new slots connecting to any element of the list `slot_indexes`. +The Blockstore offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blockstore. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec`: Returns new slots connecting to any element of the list `slot_indexes`. 1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option) -> Vec`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed. -Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blocktree. +Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blockstore. ## Interfacing with Bank @@ -80,11 +80,11 @@ The bank exposes to replay stage: be able to be chained below this vote -Replay stage uses Blocktree APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there. +Replay stage uses Blockstore APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there. -## Pruning Blocktree +## Pruning Blockstore -Once Blocktree entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blocktree contents that are not on the PoH chain for that vote for can be pruned, expunged. +Once Blockstore entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blockstore contents that are not on the PoH chain for that vote for can be pruned, expunged. Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically. diff --git a/book/src/implemented-proposals/repair-service.md b/book/src/implemented-proposals/repair-service.md index 15e3b5dfcb5de1..9a9294e4007e4a 100644 --- a/book/src/implemented-proposals/repair-service.md +++ b/book/src/implemented-proposals/repair-service.md @@ -8,32 +8,32 @@ The RepairService is in charge of retrieving missing shreds that failed to be de 1\) Validators can fail to receive particular shreds due to network failures -2\) Consider a scenario where blocktree contains the set of slots {1, 3, 5}. Then Blocktree receives shreds for some slot 7, where for each of the shreds b, b.parent == 6, so then the parent-child relation 6 -> 7 is stored in blocktree. However, there is no way to chain these slots to any of the existing banks in Blocktree, and thus the `Shred Repair` protocol will not repair these slots. If these slots happen to be part of the main chain, this will halt replay progress on this node. +2\) Consider a scenario where blockstore contains the set of slots {1, 3, 5}. Then Blockstore receives shreds for some slot 7, where for each of the shreds b, b.parent == 6, so then the parent-child relation 6 -> 7 is stored in blockstore. However, there is no way to chain these slots to any of the existing banks in Blockstore, and thus the `Shred Repair` protocol will not repair these slots. If these slots happen to be part of the main chain, this will halt replay progress on this node. 3\) Validators that find themselves behind the cluster by an entire epoch struggle/fail to catch up because they do not have a leader schedule for future epochs. If nodes were to blindly accept repair shreds in these future epochs, this exposes nodes to spam. ## Repair Protocols -The repair protocol makes best attempts to progress the forking structure of Blocktree. +The repair protocol makes best attempts to progress the forking structure of Blockstore. The different protocol strategies to address the above challenges: -1. Shred Repair \(Addresses Challenge \#1\): This is the most basic repair protocol, with the purpose of detecting and filling "holes" in the ledger. Blocktree tracks the latest root slot. RepairService will then periodically iterate every fork in blocktree starting from the root slot, sending repair requests to validators for any missing shreds. It will send at most some `N` repair reqeusts per iteration. +1. Shred Repair \(Addresses Challenge \#1\): This is the most basic repair protocol, with the purpose of detecting and filling "holes" in the ledger. Blockstore tracks the latest root slot. RepairService will then periodically iterate every fork in blockstore starting from the root slot, sending repair requests to validators for any missing shreds. It will send at most some `N` repair reqeusts per iteration. Note: Validators will only accept shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\). 2. Preemptive Slot Repair \(Addresses Challenge \#2\): The goal of this protocol is to discover the chaining relationship of "orphan" slots that do not currently chain to any known fork. - * Blocktree will track the set of "orphan" slots in a separate column family. - * RepairService will periodically make `RequestOrphan` requests for each of the orphans in blocktree. + * Blockstore will track the set of "orphan" slots in a separate column family. + * RepairService will periodically make `RequestOrphan` requests for each of the orphans in blockstore. `RequestOrphan(orphan)` request - `orphan` is the orphan slot that the requestor wants to know the parents of `RequestOrphan(orphan)` response - The highest shreds for each of the first `N` parents of the requested `orphan` On receiving the responses `p`, where `p` is some shred in a parent slot, validators will: - * Insert an empty `SlotMeta` in blocktree for `p.slot` if it doesn't already exist. + * Insert an empty `SlotMeta` in blockstore for `p.slot` if it doesn't already exist. * If `p.slot` does exist, update the parent of `p` based on `parents` - Note: that once these empty slots are added to blocktree, the `Shred Repair` protocol should attempt to fill those slots. + Note: that once these empty slots are added to blockstore, the `Shred Repair` protocol should attempt to fill those slots. Note: Validators will only accept responses containing shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\). 3. Repairmen \(Addresses Challenge \#3\): This part of the repair protocol is the primary mechanism by which new nodes joining the cluster catch up after loading a snapshot. This protocol works in a "forward" fashion, so validators can verify every shred that they receive against a known leader schedule. @@ -45,5 +45,5 @@ The different protocol strategies to address the above challenges: Observers of this gossip message with higher epochs \(repairmen\) send shreds to catch the lagging node up with the rest of the cluster. The repairmen are responsible for sending the slots within the epochs that are confrimed by the advertised `root` in gossip. The repairmen divide the responsibility of sending each of the missing slots in these epochs based on a random seed \(simple shred.index iteration by N, seeded with the repairman's node\_pubkey\). Ideally, each repairman in an N node cluster \(N nodes whose epochs are higher than that of the repairee\) sends 1/N of the missing shreds. Both data and coding shreds for missing slots are sent. Repairmen do not send shreds again to the same validator until they see the message in gossip updated, at which point they perform another iteration of this protocol. - Gossip messages are updated every time a validator receives a complete slot within the epoch. Completed slots are detected by blocktree and sent over a channel to RepairService. It is important to note that we know that by the time a slot X is complete, the epoch schedule must exist for the epoch that contains slot X because WindowService will reject shreds for unconfirmed epochs. When a newly completed slot is detected, we also update the current root if it has changed since the last update. The root is made available to RepairService through Blocktree, which holds the latest root. + Gossip messages are updated every time a validator receives a complete slot within the epoch. Completed slots are detected by blockstore and sent over a channel to RepairService. It is important to note that we know that by the time a slot X is complete, the epoch schedule must exist for the epoch that contains slot X because WindowService will reject shreds for unconfirmed epochs. When a newly completed slot is detected, we also update the current root if it has changed since the last update. The root is made available to RepairService through Blockstore, which holds the latest root. diff --git a/book/src/implemented-proposals/validator-timestamp-oracle.md b/book/src/implemented-proposals/validator-timestamp-oracle.md index aa8d9977c6c6c7..c48a756a8db454 100644 --- a/book/src/implemented-proposals/validator-timestamp-oracle.md +++ b/book/src/implemented-proposals/validator-timestamp-oracle.md @@ -84,7 +84,7 @@ let timestamp_slot = floor(current_slot / timestamp_interval); ``` Then the validator needs to gather all Vote WithTimestamp transactions from the -ledger that reference that slot, using `Blocktree::get_slot_entries()`. As these +ledger that reference that slot, using `Blockstore::get_slot_entries()`. As these transactions could have taken some time to reach and be processed by the leader, the validator needs to scan several completed blocks after the timestamp\_slot to get a reasonable set of Timestamps. The exact number of slots will need to be diff --git a/book/src/proposals/tick-verification.md b/book/src/proposals/tick-verification.md index 08950183e54dac..345d81bf163088 100644 --- a/book/src/proposals/tick-verification.md +++ b/book/src/proposals/tick-verification.md @@ -28,17 +28,17 @@ slashing proof to punish this bad behavior. 2) Otherwise, we can simply mark the slot as dead and not playable. A slashing proof may or may not be necessary depending on feasibility. -# Blocktree receiving shreds +# Blockstore receiving shreds -When blocktree receives a new shred `s`, there are two cases: +When blockstore receives a new shred `s`, there are two cases: 1) `s` is marked as `LAST_SHRED_IN_SLOT`, then check if there exists a shred -`s'` in blocktree for that slot where `s'.index > s.index` If so, together `s` +`s'` in blockstore for that slot where `s'.index > s.index` If so, together `s` and `s'` constitute a slashing proof. -2) Blocktree has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT` +2) Blockstore has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT` with index `i`. If `s.index > i`, then together `s` and `s'`constitute a -slashing proof. In this case, blocktree will also not insert `s`. +slashing proof. In this case, blockstore will also not insert `s`. 3) Duplicate shreds for the same index are ignored. Non-duplicate shreds for the same index are a slashable condition. Details for this case are covered @@ -47,7 +47,7 @@ in the `Leader Duplicate Block Slashing` section. # Replaying and validating ticks -1) Replay stage replays entries from blocktree, keeping track of the number of +1) Replay stage replays entries from blockstore, keeping track of the number of ticks it has seen per slot, and verifying there are `hashes_per_tick` number of hashes between ticcks. After the tick from this last shred has been played, replay stage then checks the total number of ticks. diff --git a/book/src/proposals/validator-proposal.md b/book/src/proposals/validator-proposal.md index 1dd68f68835400..ead91e2f3da610 100644 --- a/book/src/proposals/validator-proposal.md +++ b/book/src/proposals/validator-proposal.md @@ -41,7 +41,6 @@ schedule. ## Notable changes * Hoist FetchStage and BroadcastStage out of TPU -* Blocktree renamed to Blockstore * BankForks renamed to Banktree * TPU moves to new socket-free crate called solana-tpu. * TPU's BankingStage absorbs ReplayStage diff --git a/book/src/validator/tvu/blocktree.md b/book/src/validator/tvu/blockstore.md similarity index 62% rename from book/src/validator/tvu/blocktree.md rename to book/src/validator/tvu/blockstore.md index 68c44427f768df..722b87ff6288c2 100644 --- a/book/src/validator/tvu/blocktree.md +++ b/book/src/validator/tvu/blockstore.md @@ -1,16 +1,16 @@ -# Blocktree +# Blockstore -After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../../cluster/fork-generation.md). The _blocktree_ data structure described here is how a validator copes with those forks until blocks are finalized. +After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized. -The blocktree allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot. +The blockstore allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot. Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them. -Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blocktree. +Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blockstore. -## Functionalities of Blocktree +## Functionalities of Blockstore -1. Persistence: the Blocktree lives in the front of the nodes verification +1. Persistence: the Blockstore lives in the front of the nodes verification pipeline, right behind network receive and signature verification. If the @@ -20,26 +20,26 @@ Repair requests for recent shreds are served out of RAM or recent files and out 2. Repair: repair is the same as window repair above, but able to serve any - shred that's been received. Blocktree stores shreds with signatures, + shred that's been received. Blockstore stores shreds with signatures, preserving the chain of origination. -3. Forks: Blocktree supports random access of shreds, so can support a +3. Forks: Blockstore supports random access of shreds, so can support a validator's need to rollback and replay from a Bank checkpoint. -4. Restart: with proper pruning/culling, the Blocktree can be replayed by +4. Restart: with proper pruning/culling, the Blockstore can be replayed by ordered enumeration of entries from slot 0. The logic of the replay stage \(i.e. dealing with forks\) will have to be used for the most recent entries in - the Blocktree. + the Blockstore. -## Blocktree Design +## Blockstore Design -1. Entries in the Blocktree are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\). -2. The Blocktree maintains metadata for each slot, in the `SlotMeta` struct containing: +1. Entries in the Blockstore are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\). +2. The Blockstore maintains metadata for each slot, in the `SlotMeta` struct containing: * `slot_index` - The index of this slot * `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\) * `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\). @@ -53,16 +53,16 @@ Repair requests for recent shreds are served out of RAM or recent files and out is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\) 3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`. -4. Subscriptions - The Blocktree records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blocktree channel for consumption by the ReplayStage. See the `Blocktree APIs` for details. -5. Update notifications - The Blocktree notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`. +4. Subscriptions - The Blockstore records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blockstore channel for consumption by the ReplayStage. See the `Blockstore APIs` for details. +5. Update notifications - The Blockstore notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`. -## Blocktree APIs +## Blockstore APIs -The Blocktree offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blocktree. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec`: Returns new slots connecting to any element of the list `slot_indexes`. +The Blockstore offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blockstore. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec`: Returns new slots connecting to any element of the list `slot_indexes`. 1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option) -> Vec`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed. -Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blocktree. +Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blockstore. ## Interfacing with Bank @@ -80,11 +80,11 @@ The bank exposes to replay stage: be able to be chained below this vote -Replay stage uses Blocktree APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there. +Replay stage uses Blockstore APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there. -## Pruning Blocktree +## Pruning Blockstore -Once Blocktree entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blocktree contents that are not on the PoH chain for that vote for can be pruned, expunged. +Once Blockstore entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blockstore contents that are not on the PoH chain for that vote for can be pruned, expunged. Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically. diff --git a/core/Cargo.toml b/core/Cargo.toml index 9c08b98e762428..9d100026705830 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -85,7 +85,7 @@ systemstat = "0.1.5" name = "banking_stage" [[bench]] -name = "blocktree" +name = "blockstore" [[bench]] name = "gen_keys" diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 6da3c8fbb5f7b9..6e887dfb15be1a 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -12,9 +12,9 @@ use solana_core::cluster_info::Node; use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use solana_core::packet::to_packets_chunked; use solana_core::poh_recorder::WorkingBankEntry; -use solana_ledger::blocktree_processor::process_entries; +use solana_ledger::blockstore_processor::process_entries; use solana_ledger::entry::{next_hash, Entry}; -use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; +use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}; use solana_perf::test_tx::test_tx; use solana_runtime::bank::Bank; use solana_sdk::genesis_config::GenesisConfig; @@ -57,11 +57,11 @@ fn bench_consume_buffered(bencher: &mut Bencher) { let ledger_path = get_tmp_ledger_path!(); let my_pubkey = Pubkey::new_rand(); { - let blocktree = Arc::new( - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), + let blockstore = Arc::new( + Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), ); let (exit, poh_recorder, poh_service, _signal_receiver) = - create_test_recorder(&bank, &blocktree, None); + create_test_recorder(&bank, &blockstore, None); let tx = test_tx(); let len = 4096; @@ -87,7 +87,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) { exit.store(true, Ordering::Relaxed); poh_service.join().unwrap(); } - let _unused = Blocktree::destroy(&ledger_path); + let _unused = Blockstore::destroy(&ledger_path); } fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec { @@ -184,11 +184,11 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { let verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = Arc::new( - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), + let blockstore = Arc::new( + Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), ); let (exit, poh_recorder, poh_service, signal_receiver) = - create_test_recorder(&bank, &blocktree, None); + create_test_recorder(&bank, &blockstore, None); let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); let cluster_info = Arc::new(RwLock::new(cluster_info)); let _banking_stage = BankingStage::new( @@ -244,7 +244,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { exit.store(true, Ordering::Relaxed); poh_service.join().unwrap(); } - let _unused = Blocktree::destroy(&ledger_path); + let _unused = Blockstore::destroy(&ledger_path); } #[bench] diff --git a/core/benches/blocktree.rs b/core/benches/blockstore.rs similarity index 71% rename from core/benches/blocktree.rs rename to core/benches/blockstore.rs index ea12654fbf57ee..aac755a952c245 100644 --- a/core/benches/blocktree.rs +++ b/core/benches/blockstore.rs @@ -6,7 +6,7 @@ extern crate test; use rand::Rng; use solana_ledger::{ - blocktree::{entries_to_test_shreds, Blocktree}, + blockstore::{entries_to_test_shreds, Blockstore}, entry::{create_ticks, Entry}, get_tmp_ledger_path, }; @@ -16,19 +16,19 @@ use test::Bencher; // Given some shreds and a ledger at ledger_path, benchmark writing the shreds to the ledger fn bench_write_shreds(bench: &mut Bencher, entries: Vec, ledger_path: &Path) { - let blocktree = - Blocktree::open(ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = + Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"); bench.iter(move || { let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); }); - Blocktree::destroy(ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(ledger_path).expect("Expected successful database destruction"); } // Insert some shreds into the ledger in preparation for read benchmarks fn setup_read_bench( - blocktree: &mut Blocktree, + blockstore: &mut Blockstore, num_small_shreds: u64, num_large_shreds: u64, slot: Slot, @@ -42,7 +42,7 @@ fn setup_read_bench( // Convert the entries to shreds, write the shreds to the ledger let shreds = entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0); - blocktree + blockstore .insert_shreds(shreds, None, false) .expect("Expectd successful insertion of shreds into ledger"); } @@ -71,15 +71,15 @@ fn bench_write_big(bench: &mut Bencher) { #[ignore] fn bench_read_sequential(bench: &mut Bencher) { let ledger_path = get_tmp_ledger_path!(); - let mut blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let mut blockstore = + Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); // Insert some big and small shreds into the ledger let num_small_shreds = 32 * 1024; let num_large_shreds = 32 * 1024; let total_shreds = num_small_shreds + num_large_shreds; let slot = 0; - setup_read_bench(&mut blocktree, num_small_shreds, num_large_shreds, slot); + setup_read_bench(&mut blockstore, num_small_shreds, num_large_shreds, slot); let num_reads = total_shreds / 15; let mut rng = rand::thread_rng(); @@ -87,26 +87,26 @@ fn bench_read_sequential(bench: &mut Bencher) { // Generate random starting point in the range [0, total_shreds - 1], read num_reads shreds sequentially let start_index = rng.gen_range(0, num_small_shreds + num_large_shreds); for i in start_index..start_index + num_reads { - let _ = blocktree.get_data_shred(slot, i as u64 % total_shreds); + let _ = blockstore.get_data_shred(slot, i as u64 % total_shreds); } }); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[bench] #[ignore] fn bench_read_random(bench: &mut Bencher) { let ledger_path = get_tmp_ledger_path!(); - let mut blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let mut blockstore = + Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); // Insert some big and small shreds into the ledger let num_small_shreds = 32 * 1024; let num_large_shreds = 32 * 1024; let total_shreds = num_small_shreds + num_large_shreds; let slot = 0; - setup_read_bench(&mut blocktree, num_small_shreds, num_large_shreds, slot); + setup_read_bench(&mut blockstore, num_small_shreds, num_large_shreds, slot); let num_reads = total_shreds / 15; @@ -118,39 +118,39 @@ fn bench_read_random(bench: &mut Bencher) { .collect(); bench.iter(move || { for i in indexes.iter() { - let _ = blocktree.get_data_shred(slot, *i as u64); + let _ = blockstore.get_data_shred(slot, *i as u64); } }); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[bench] #[ignore] fn bench_insert_data_shred_small(bench: &mut Bencher) { let ledger_path = get_tmp_ledger_path!(); - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = + Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); let num_entries = 32 * 1024; let entries = create_ticks(num_entries, 0, Hash::default()); bench.iter(move || { let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); }); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[bench] #[ignore] fn bench_insert_data_shred_big(bench: &mut Bencher) { let ledger_path = get_tmp_ledger_path!(); - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = + Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); let num_entries = 32 * 1024; let entries = create_ticks(num_entries, 0, Hash::default()); bench.iter(move || { let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); }); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } diff --git a/core/src/archiver.rs b/core/src/archiver.rs index f31fc4b14b8156..90417f46ff52b7 100644 --- a/core/src/archiver.rs +++ b/core/src/archiver.rs @@ -19,7 +19,7 @@ use rand::{thread_rng, Rng, SeedableRng}; use rand_chacha::ChaChaRng; use solana_client::{rpc_client::RpcClient, rpc_request::RpcRequest, thin_client::ThinClient}; use solana_ledger::{ - blocktree::Blocktree, leader_schedule_cache::LeaderScheduleCache, shred::Shred, + blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::Shred, }; use solana_net_utils::bind_in_range; use solana_perf::packet::Packets; @@ -222,13 +222,13 @@ impl Archiver { // Note for now, this ledger will not contain any of the existing entries // in the ledger located at ledger_path, and will only append on newly received // entries after being passed to window_service - let blocktree = Arc::new( - Blocktree::open(ledger_path).expect("Expected to be able to open database ledger"), + let blockstore = Arc::new( + Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"), ); let gossip_service = GossipService::new( &cluster_info, - Some(blocktree.clone()), + Some(blockstore.clone()), None, node.sockets.gossip, &exit, @@ -294,7 +294,7 @@ impl Archiver { let window_service = match Self::setup( &mut meta, cluster_info.clone(), - &blocktree, + &blockstore, &exit, &node_info, &storage_keypair, @@ -320,7 +320,7 @@ impl Archiver { // run archiver Self::run( &mut meta, - &blocktree, + &blockstore, cluster_info, &keypair, &storage_keypair, @@ -344,14 +344,14 @@ impl Archiver { fn run( meta: &mut ArchiverMeta, - blocktree: &Arc, + blockstore: &Arc, cluster_info: Arc>, archiver_keypair: &Arc, storage_keypair: &Arc, exit: &Arc, ) { // encrypt segment - Self::encrypt_ledger(meta, blocktree).expect("ledger encrypt not successful"); + Self::encrypt_ledger(meta, blockstore).expect("ledger encrypt not successful"); let enc_file_path = meta.ledger_data_file_encrypted.clone(); // do replicate loop { @@ -443,7 +443,7 @@ impl Archiver { fn setup( meta: &mut ArchiverMeta, cluster_info: Arc>, - blocktree: &Arc, + blockstore: &Arc, exit: &Arc, node_info: &ContactInfo, storage_keypair: &Arc, @@ -498,7 +498,7 @@ impl Archiver { ); let window_service = WindowService::new( - blocktree.clone(), + blockstore.clone(), cluster_info.clone(), verified_receiver, retransmit_sender, @@ -512,7 +512,7 @@ impl Archiver { Self::wait_for_segment_download( slot, slots_per_segment, - &blocktree, + &blockstore, &exit, &node_info, cluster_info, @@ -523,7 +523,7 @@ impl Archiver { fn wait_for_segment_download( start_slot: Slot, slots_per_segment: u64, - blocktree: &Arc, + blockstore: &Arc, exit: &Arc, node_info: &ContactInfo, cluster_info: Arc>, @@ -534,7 +534,7 @@ impl Archiver { ); let mut current_slot = start_slot; 'outer: loop { - while blocktree.is_full(current_slot) { + while blockstore.is_full(current_slot) { current_slot += 1; info!("current slot: {}", current_slot); if current_slot >= start_slot + slots_per_segment { @@ -559,7 +559,7 @@ impl Archiver { } } - fn encrypt_ledger(meta: &mut ArchiverMeta, blocktree: &Arc) -> Result<()> { + fn encrypt_ledger(meta: &mut ArchiverMeta, blockstore: &Arc) -> Result<()> { meta.ledger_data_file_encrypted = meta.ledger_path.join(ENCRYPTED_FILENAME); { @@ -567,7 +567,7 @@ impl Archiver { ivec.copy_from_slice(&meta.signature.as_ref()); let num_encrypted_bytes = chacha_cbc_encrypt_ledger( - blocktree, + blockstore, meta.slot, meta.slots_per_segment, &meta.ledger_data_file_encrypted, @@ -844,15 +844,15 @@ impl Archiver { } } - /// Ask an archiver to populate a given blocktree with its segment. + /// Ask an archiver to populate a given blockstore with its segment. /// Return the slot at the start of the archiver's segment /// - /// It is recommended to use a temporary blocktree for this since the download will not verify + /// It is recommended to use a temporary blockstore for this since the download will not verify /// shreds received and might impact the chaining of shreds across slots pub fn download_from_archiver( cluster_info: &Arc>, archiver_info: &ContactInfo, - blocktree: &Arc, + blockstore: &Arc, slots_per_segment: u64, ) -> Result { // Create a client which downloads from the archiver and see that it @@ -884,7 +884,7 @@ impl Archiver { for _ in 0..120 { // Strategy used by archivers let repairs = RepairService::generate_repairs_in_range( - blocktree, + blockstore, repair_service::MAX_REPAIR_LENGTH, &repair_slot_range, ); @@ -930,10 +930,10 @@ impl Archiver { .into_iter() .filter_map(|p| Shred::new_from_serialized_shred(p.data.to_vec()).ok()) .collect(); - blocktree.insert_shreds(shreds, None, false)?; + blockstore.insert_shreds(shreds, None, false)?; } // check if all the slots in the segment are complete - if Self::segment_complete(start_slot, slots_per_segment, blocktree) { + if Self::segment_complete(start_slot, slots_per_segment, blockstore) { break; } sleep(Duration::from_millis(500)); @@ -942,7 +942,7 @@ impl Archiver { t_receiver.join().unwrap(); // check if all the slots in the segment are complete - if !Self::segment_complete(start_slot, slots_per_segment, blocktree) { + if !Self::segment_complete(start_slot, slots_per_segment, blockstore) { return Err( io::Error::new(ErrorKind::Other, "Unable to download the full segment").into(), ); @@ -953,10 +953,10 @@ impl Archiver { fn segment_complete( start_slot: Slot, slots_per_segment: u64, - blocktree: &Arc, + blockstore: &Arc, ) -> bool { for slot in start_slot..(start_slot + slots_per_segment) { - if !blocktree.is_full(slot) { + if !blockstore.is_full(slot) { return false; } } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index d15b5741f620f3..01a5a6ce9a11e0 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -10,8 +10,8 @@ use crate::{ use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError}; use itertools::Itertools; use solana_ledger::{ - blocktree::Blocktree, - blocktree_processor::{send_transaction_status_batch, TransactionStatusSender}, + blockstore::Blockstore, + blockstore_processor::{send_transaction_status_batch, TransactionStatusSender}, entry::hash_transactions, leader_schedule_cache::LeaderScheduleCache, }; @@ -979,7 +979,7 @@ impl BankingStage { pub fn create_test_recorder( bank: &Arc, - blocktree: &Arc, + blockstore: &Arc, poh_config: Option, ) -> ( Arc, @@ -996,7 +996,7 @@ pub fn create_test_recorder( Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - blocktree, + blockstore, &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &poh_config, ); @@ -1022,7 +1022,7 @@ mod tests { use itertools::Itertools; use solana_client::rpc_request::RpcEncodedTransaction; use solana_ledger::{ - blocktree::entries_to_test_shreds, + blockstore::entries_to_test_shreds, entry::{next_entry, Entry, EntrySlice}, get_tmp_ledger_path, }; @@ -1043,11 +1043,12 @@ mod tests { let (vote_sender, vote_receiver) = unbounded(); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = Arc::new( - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), + let blockstore = Arc::new( + Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"), ); let (exit, poh_recorder, poh_service, _entry_receiever) = - create_test_recorder(&bank, &blocktree, None); + create_test_recorder(&bank, &blockstore, None); let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); let cluster_info = Arc::new(RwLock::new(cluster_info)); let banking_stage = BankingStage::new( @@ -1063,7 +1064,7 @@ mod tests { banking_stage.join().unwrap(); poh_service.join().unwrap(); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] @@ -1080,13 +1081,14 @@ mod tests { let (vote_sender, vote_receiver) = unbounded(); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = Arc::new( - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), + let blockstore = Arc::new( + Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"), ); let mut poh_config = PohConfig::default(); poh_config.target_tick_count = Some(bank.max_tick_height() + num_extra_ticks); let (exit, poh_recorder, poh_service, entry_receiver) = - create_test_recorder(&bank, &blocktree, Some(poh_config)); + create_test_recorder(&bank, &blockstore, Some(poh_config)); let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); let cluster_info = Arc::new(RwLock::new(cluster_info)); let banking_stage = BankingStage::new( @@ -1114,7 +1116,7 @@ mod tests { assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash()); banking_stage.join().unwrap(); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec)>) -> Vec { @@ -1141,14 +1143,15 @@ mod tests { let (vote_sender, vote_receiver) = unbounded(); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = Arc::new( - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), + let blockstore = Arc::new( + Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"), ); let mut poh_config = PohConfig::default(); // limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage poh_config.target_tick_count = Some(bank.max_tick_height() - 1); let (exit, poh_recorder, poh_service, entry_receiver) = - create_test_recorder(&bank, &blocktree, Some(poh_config)); + create_test_recorder(&bank, &blockstore, Some(poh_config)); let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); let cluster_info = Arc::new(RwLock::new(cluster_info)); let banking_stage = BankingStage::new( @@ -1234,7 +1237,7 @@ mod tests { drop(entry_receiver); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] @@ -1280,15 +1283,15 @@ mod tests { let entry_receiver = { // start a banking_stage to eat verified receiver let bank = Arc::new(Bank::new(&genesis_config)); - let blocktree = Arc::new( - Blocktree::open(&ledger_path) + let blockstore = Arc::new( + Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"), ); let mut poh_config = PohConfig::default(); // limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage poh_config.target_tick_count = Some(bank.max_tick_height() - 1); let (exit, poh_recorder, poh_service, entry_receiver) = - create_test_recorder(&bank, &blocktree, Some(poh_config)); + create_test_recorder(&bank, &blockstore, Some(poh_config)); let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); let cluster_info = Arc::new(RwLock::new(cluster_info)); @@ -1331,7 +1334,7 @@ mod tests { // the account balance below zero before the credit is added. assert_eq!(bank.get_balance(&alice.pubkey()), 2); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] @@ -1349,8 +1352,8 @@ mod tests { }; let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let (poh_recorder, entry_receiver) = PohRecorder::new( bank.tick_height(), bank.last_blockhash(), @@ -1358,7 +1361,7 @@ mod tests { None, bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -1435,7 +1438,7 @@ mod tests { // Should receive nothing from PohRecorder b/c record failed assert!(entry_receiver.try_recv().is_err()); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] @@ -1685,8 +1688,8 @@ mod tests { }; let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let (poh_recorder, entry_receiver) = PohRecorder::new( bank.tick_height(), bank.last_blockhash(), @@ -1694,7 +1697,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &pubkey, - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -1751,7 +1754,7 @@ mod tests { assert_eq!(bank.get_balance(&pubkey), 1); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] @@ -1778,8 +1781,8 @@ mod tests { }; let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let (poh_recorder, _entry_receiver) = PohRecorder::new( bank.tick_height(), bank.last_blockhash(), @@ -1787,7 +1790,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &pubkey, - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -1806,7 +1809,7 @@ mod tests { assert!(result.is_ok()); assert_eq!(unprocessed.len(), 1); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] @@ -1866,8 +1869,8 @@ mod tests { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let (poh_recorder, _entry_receiver) = PohRecorder::new( bank.tick_height(), bank.last_blockhash(), @@ -1875,7 +1878,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::new_rand(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -1894,7 +1897,7 @@ mod tests { assert_eq!(retryable_txs, expected); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] @@ -1933,9 +1936,9 @@ mod tests { }; let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); - let blocktree = Arc::new(blocktree); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); + let blockstore = Arc::new(blockstore); let (poh_recorder, _entry_receiver) = PohRecorder::new( bank.tick_height(), bank.last_blockhash(), @@ -1943,7 +1946,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &pubkey, - &blocktree, + &blockstore, &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -1952,13 +1955,13 @@ mod tests { poh_recorder.lock().unwrap().set_working_bank(working_bank); let shreds = entries_to_test_shreds(entries.clone(), bank.slot(), 0, true, 0); - blocktree.insert_shreds(shreds, None, false).unwrap(); - blocktree.set_roots(&[bank.slot()]).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); + blockstore.set_roots(&[bank.slot()]).unwrap(); let (transaction_status_sender, transaction_status_receiver) = unbounded(); let transaction_status_service = TransactionStatusService::new( transaction_status_receiver, - blocktree.clone(), + blockstore.clone(), &Arc::new(AtomicBool::new(false)), ); @@ -1972,7 +1975,7 @@ mod tests { transaction_status_service.join().unwrap(); - let confirmed_block = blocktree.get_confirmed_block(bank.slot(), None).unwrap(); + let confirmed_block = blockstore.get_confirmed_block(bank.slot(), None).unwrap(); assert_eq!(confirmed_block.transactions.len(), 3); for (transaction, result) in confirmed_block.transactions.into_iter() { @@ -1993,6 +1996,6 @@ mod tests { } } } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } } diff --git a/core/src/blockstream_service.rs b/core/src/blockstream_service.rs index 0770c5380b8020..ce7a66e9bd6ce7 100644 --- a/core/src/blockstream_service.rs +++ b/core/src/blockstream_service.rs @@ -8,7 +8,7 @@ use crate::blockstream::MockBlockstream as Blockstream; #[cfg(not(test))] use crate::blockstream::SocketBlockstream as Blockstream; use crate::result::{Error, Result}; -use solana_ledger::blocktree::Blocktree; +use solana_ledger::blockstore::Blockstore; use solana_sdk::pubkey::Pubkey; use std::path::Path; use std::sync::atomic::{AtomicBool, Ordering}; @@ -25,7 +25,7 @@ impl BlockstreamService { #[allow(clippy::new_ret_no_self)] pub fn new( slot_full_receiver: Receiver<(u64, Pubkey)>, - blocktree: Arc, + blockstore: Arc, unix_socket: &Path, exit: &Arc, ) -> Self { @@ -38,7 +38,7 @@ impl BlockstreamService { break; } if let Err(e) = - Self::process_entries(&slot_full_receiver, &blocktree, &mut blockstream) + Self::process_entries(&slot_full_receiver, &blockstore, &mut blockstream) { match e { Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break, @@ -52,18 +52,18 @@ impl BlockstreamService { } fn process_entries( slot_full_receiver: &Receiver<(u64, Pubkey)>, - blocktree: &Arc, + blockstore: &Arc, blockstream: &mut Blockstream, ) -> Result<()> { let timeout = Duration::new(1, 0); let (slot, slot_leader) = slot_full_receiver.recv_timeout(timeout)?; - let entries = blocktree.get_slot_entries(slot, 0, None).unwrap(); - let blocktree_meta = blocktree.meta(slot).unwrap().unwrap(); + let entries = blockstore.get_slot_entries(slot, 0, None).unwrap(); + let blockstore_meta = blockstore.meta(slot).unwrap().unwrap(); let _parent_slot = if slot == 0 { None } else { - Some(blocktree_meta.parent_slot) + Some(blockstore_meta.parent_slot) }; let ticks_per_slot = entries.iter().filter(|entry| entry.is_tick()).count() as u64; let mut tick_height = ticks_per_slot * slot; @@ -113,14 +113,14 @@ mod test { let ticks_per_slot = 5; let leader_pubkey = Pubkey::new_rand(); - // Set up genesis config and blocktree + // Set up genesis config and blockstore let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config(1000); genesis_config.ticks_per_slot = ticks_per_slot; let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); // Set up blockstream let mut blockstream = Blockstream::new(&PathBuf::from("test_stream")); @@ -143,7 +143,7 @@ mod test { let expected_entries = entries.clone(); let expected_tick_heights = [6, 7, 8, 9, 9, 10]; - blocktree + blockstore .write_entries( 1, 0, @@ -160,7 +160,7 @@ mod test { slot_full_sender.send((1, leader_pubkey)).unwrap(); BlockstreamService::process_entries( &slot_full_receiver, - &Arc::new(blocktree), + &Arc::new(blockstore), &mut blockstream, ) .unwrap(); diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index d4c2478715bd30..2fad2740e79b52 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -5,7 +5,7 @@ use self::standard_broadcast_run::StandardBroadcastRun; use crate::cluster_info::{ClusterInfo, ClusterInfoError}; use crate::poh_recorder::WorkingBankEntry; use crate::result::{Error, Result}; -use solana_ledger::blocktree::Blocktree; +use solana_ledger::blockstore::Blockstore; use solana_ledger::shred::Shred; use solana_ledger::staking_utils; use solana_metrics::{inc_new_counter_error, inc_new_counter_info}; @@ -44,7 +44,7 @@ impl BroadcastStageType { cluster_info: Arc>, receiver: Receiver, exit_sender: &Arc, - blocktree: &Arc, + blockstore: &Arc, shred_version: u16, ) -> BroadcastStage { let keypair = cluster_info.read().unwrap().keypair.clone(); @@ -54,7 +54,7 @@ impl BroadcastStageType { cluster_info, receiver, exit_sender, - blocktree, + blockstore, StandardBroadcastRun::new(keypair, shred_version), ), @@ -63,7 +63,7 @@ impl BroadcastStageType { cluster_info, receiver, exit_sender, - blocktree, + blockstore, FailEntryVerificationBroadcastRun::new(keypair, shred_version), ), @@ -72,7 +72,7 @@ impl BroadcastStageType { cluster_info, receiver, exit_sender, - blocktree, + blockstore, BroadcastFakeShredsRun::new(keypair, 0, shred_version), ), } @@ -83,10 +83,10 @@ type TransmitShreds = (Option>>, Arc>); trait BroadcastRun { fn run( &mut self, - blocktree: &Arc, + blockstore: &Arc, receiver: &Receiver, socket_sender: &Sender, - blocktree_sender: &Sender>>, + blockstore_sender: &Sender>>, ) -> Result<()>; fn transmit( &self, @@ -97,7 +97,7 @@ trait BroadcastRun { fn record( &self, receiver: &Arc>>>>, - blocktree: &Arc, + blockstore: &Arc, ) -> Result<()>; } @@ -126,14 +126,15 @@ pub struct BroadcastStage { impl BroadcastStage { #[allow(clippy::too_many_arguments)] fn run( - blocktree: &Arc, + blockstore: &Arc, receiver: &Receiver, socket_sender: &Sender, - blocktree_sender: &Sender>>, + blockstore_sender: &Sender>>, mut broadcast_stage_run: impl BroadcastRun, ) -> BroadcastStageReturnType { loop { - let res = broadcast_stage_run.run(blocktree, receiver, socket_sender, blocktree_sender); + let res = + broadcast_stage_run.run(blockstore, receiver, socket_sender, blockstore_sender); let res = Self::handle_error(res); if let Some(res) = res { return res; @@ -180,19 +181,25 @@ impl BroadcastStage { cluster_info: Arc>, receiver: Receiver, exit_sender: &Arc, - blocktree: &Arc, + blockstore: &Arc, broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone, ) -> Self { - let btree = blocktree.clone(); + let btree = blockstore.clone(); let exit = exit_sender.clone(); let (socket_sender, socket_receiver) = channel(); - let (blocktree_sender, blocktree_receiver) = channel(); + let (blockstore_sender, blockstore_receiver) = channel(); let bs_run = broadcast_stage_run.clone(); let thread_hdl = Builder::new() .name("solana-broadcaster".to_string()) .spawn(move || { let _finalizer = Finalizer::new(exit); - Self::run(&btree, &receiver, &socket_sender, &blocktree_sender, bs_run) + Self::run( + &btree, + &receiver, + &socket_sender, + &blockstore_sender, + bs_run, + ) }) .unwrap(); let mut thread_hdls = vec![thread_hdl]; @@ -213,15 +220,15 @@ impl BroadcastStage { .unwrap(); thread_hdls.push(t); } - let blocktree_receiver = Arc::new(Mutex::new(blocktree_receiver)); + let blockstore_receiver = Arc::new(Mutex::new(blockstore_receiver)); for _ in 0..NUM_INSERT_THREADS { - let blocktree_receiver = blocktree_receiver.clone(); + let blockstore_receiver = blockstore_receiver.clone(); let bs_record = broadcast_stage_run.clone(); - let btree = blocktree.clone(); + let btree = blockstore.clone(); let t = Builder::new() .name("solana-broadcaster-record".to_string()) .spawn(move || loop { - let res = bs_record.record(&blocktree_receiver, &btree); + let res = bs_record.record(&blockstore_receiver, &btree); let res = Self::handle_error(res); if let Some(res) = res { return res; @@ -248,7 +255,7 @@ mod test { use crate::cluster_info::{ClusterInfo, Node}; use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use solana_ledger::entry::create_ticks; - use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; + use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}; use solana_runtime::bank::Bank; use solana_sdk::hash::Hash; use solana_sdk::pubkey::Pubkey; @@ -261,7 +268,7 @@ mod test { use std::time::Duration; struct MockBroadcastStage { - blocktree: Arc, + blockstore: Arc, broadcast_service: BroadcastStage, bank: Arc, } @@ -272,7 +279,7 @@ mod test { entry_receiver: Receiver, ) -> MockBroadcastStage { // Make the database ledger - let blocktree = Arc::new(Blocktree::open(ledger_path).unwrap()); + let blockstore = Arc::new(Blockstore::open(ledger_path).unwrap()); // Make the leader node and scheduler let leader_info = Node::new_localhost_with_pubkey(leader_pubkey); @@ -298,12 +305,12 @@ mod test { cluster_info, entry_receiver, &exit_sender, - &blocktree, + &blockstore, StandardBroadcastRun::new(leader_keypair, 0), ); MockBroadcastStage { - blocktree, + blockstore, broadcast_service, bank, } @@ -350,8 +357,8 @@ mod test { ticks_per_slot, ); - let blocktree = broadcast_service.blocktree; - let (entries, _, _) = blocktree + let blockstore = broadcast_service.blockstore; + let (entries, _, _) = blockstore .get_slot_entries_with_shred_info(slot, 0) .expect("Expect entries to be present"); assert_eq!(entries.len(), max_tick_height as usize); @@ -363,6 +370,6 @@ mod test { .expect("Expect successful join of broadcast service"); } - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } } diff --git a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs index fc9ed79fca1522..f7333ef0cd4e5c 100644 --- a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -26,17 +26,17 @@ impl BroadcastFakeShredsRun { impl BroadcastRun for BroadcastFakeShredsRun { fn run( &mut self, - blocktree: &Arc, + blockstore: &Arc, receiver: &Receiver, socket_sender: &Sender, - blocktree_sender: &Sender>>, + blockstore_sender: &Sender>>, ) -> Result<()> { // 1) Pull entries from banking stage let receive_results = broadcast_utils::recv_slot_entries(receiver)?; let bank = receive_results.bank.clone(); let last_tick_height = receive_results.last_tick_height; - let next_shred_index = blocktree + let next_shred_index = blockstore .meta(bank.slot()) .expect("Database error") .map(|meta| meta.consumed) @@ -83,7 +83,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { } let data_shreds = Arc::new(data_shreds); - blocktree_sender.send(data_shreds.clone())?; + blockstore_sender.send(data_shreds.clone())?; // 3) Start broadcast step //some indicates fake shreds @@ -121,10 +121,10 @@ impl BroadcastRun for BroadcastFakeShredsRun { fn record( &self, receiver: &Arc>>>>, - blocktree: &Arc, + blockstore: &Arc, ) -> Result<()> { for data_shreds in receiver.lock().unwrap().iter() { - blocktree.insert_shreds(data_shreds.to_vec(), None, true)?; + blockstore.insert_shreds(data_shreds.to_vec(), None, true)?; } Ok(()) } diff --git a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index 739b45759c5991..a8316a28357621 100644 --- a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -21,10 +21,10 @@ impl FailEntryVerificationBroadcastRun { impl BroadcastRun for FailEntryVerificationBroadcastRun { fn run( &mut self, - blocktree: &Arc, + blockstore: &Arc, receiver: &Receiver, socket_sender: &Sender, - blocktree_sender: &Sender>>, + blockstore_sender: &Sender>>, ) -> Result<()> { // 1) Pull entries from banking stage let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?; @@ -38,7 +38,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { last_entry.hash = Hash::default(); } - let next_shred_index = blocktree + let next_shred_index = blockstore .meta(bank.slot()) .expect("Database error") .map(|meta| meta.consumed) @@ -61,7 +61,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { ); let data_shreds = Arc::new(data_shreds); - blocktree_sender.send(data_shreds.clone())?; + blockstore_sender.send(data_shreds.clone())?; // 3) Start broadcast step let bank_epoch = bank.get_leader_schedule_epoch(bank.slot()); let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch); @@ -90,12 +90,12 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { fn record( &self, receiver: &Arc>>>>, - blocktree: &Arc, + blockstore: &Arc, ) -> Result<()> { let all_shreds = receiver.lock().unwrap().recv()?; - blocktree + blockstore .insert_shreds(all_shreds.to_vec(), None, true) - .expect("Failed to insert shreds in blocktree"); + .expect("Failed to insert shreds in blockstore"); Ok(()) } } diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index b37ce18046b398..ebe6643bab3a5e 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -83,13 +83,13 @@ impl StandardBroadcastRun { last_unfinished_slot_shred } - fn init_shredder(&self, blocktree: &Blocktree, reference_tick: u8) -> (Shredder, u32) { + fn init_shredder(&self, blockstore: &Blockstore, reference_tick: u8) -> (Shredder, u32) { let (slot, parent_slot) = self.current_slot_and_parent.unwrap(); let next_shred_index = self .unfinished_slot .map(|s| s.next_shred_index) .unwrap_or_else(|| { - blocktree + blockstore .meta(slot) .expect("Database error") .map(|meta| meta.consumed) @@ -132,27 +132,27 @@ impl StandardBroadcastRun { &mut self, cluster_info: &Arc>, sock: &UdpSocket, - blocktree: &Arc, + blockstore: &Arc, receive_results: ReceiveResults, ) -> Result<()> { let (bsend, brecv) = channel(); let (ssend, srecv) = channel(); - self.process_receive_results(&blocktree, &ssend, &bsend, receive_results)?; + self.process_receive_results(&blockstore, &ssend, &bsend, receive_results)?; let srecv = Arc::new(Mutex::new(srecv)); let brecv = Arc::new(Mutex::new(brecv)); //data let _ = self.transmit(&srecv, cluster_info, sock); //coding let _ = self.transmit(&srecv, cluster_info, sock); - let _ = self.record(&brecv, blocktree); + let _ = self.record(&brecv, blockstore); Ok(()) } fn process_receive_results( &mut self, - blocktree: &Arc, + blockstore: &Arc, socket_sender: &Sender, - blocktree_sender: &Sender>>, + blockstore_sender: &Sender>>, receive_results: ReceiveResults, ) -> Result<()> { let mut receive_elapsed = receive_results.time_elapsed; @@ -181,7 +181,7 @@ impl StandardBroadcastRun { // 2) Convert entries to shreds and coding shreds let (shredder, next_shred_index) = self.init_shredder( - blocktree, + blockstore, (bank.tick_height() % bank.ticks_per_slot()) as u8, ); let mut data_shreds = self.entries_to_data_shreds( @@ -190,13 +190,13 @@ impl StandardBroadcastRun { &receive_results.entries, last_tick_height == bank.max_tick_height(), ); - //Insert the first shred so blocktree stores that the leader started this block + //Insert the first shred so blockstore stores that the leader started this block //This must be done before the blocks are sent out over the wire. if !data_shreds.is_empty() && data_shreds[0].index() == 0 { let first = vec![data_shreds[0].clone()]; - blocktree + blockstore .insert_shreds(first, None, true) - .expect("Failed to insert shreds in blocktree"); + .expect("Failed to insert shreds in blockstore"); } let last_data_shred = data_shreds.len(); if let Some(last_shred) = last_unfinished_slot_shred { @@ -209,7 +209,7 @@ impl StandardBroadcastRun { let stakes = stakes.map(Arc::new); let data_shreds = Arc::new(data_shreds); socket_sender.send((stakes.clone(), data_shreds.clone()))?; - blocktree_sender.send(data_shreds.clone())?; + blockstore_sender.send(data_shreds.clone())?; let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]); let coding_shreds = Arc::new(coding_shreds); socket_sender.send((stakes, coding_shreds))?; @@ -227,8 +227,8 @@ impl StandardBroadcastRun { Ok(()) } - fn insert(&self, blocktree: &Arc, shreds: Arc>) -> Result<()> { - // Insert shreds into blocktree + fn insert(&self, blockstore: &Arc, shreds: Arc>) -> Result<()> { + // Insert shreds into blockstore let insert_shreds_start = Instant::now(); //The first shred is inserted synchronously let data_shreds = if !shreds.is_empty() && shreds[0].index() == 0 { @@ -236,9 +236,9 @@ impl StandardBroadcastRun { } else { shreds.to_vec() }; - blocktree + blockstore .insert_shreds(data_shreds, None, true) - .expect("Failed to insert shreds in blocktree"); + .expect("Failed to insert shreds in blockstore"); let insert_shreds_elapsed = insert_shreds_start.elapsed(); self.update_broadcast_stats(BroadcastStats { insert_shreds_elapsed: duration_as_us(&insert_shreds_elapsed), @@ -317,13 +317,18 @@ impl StandardBroadcastRun { impl BroadcastRun for StandardBroadcastRun { fn run( &mut self, - blocktree: &Arc, + blockstore: &Arc, receiver: &Receiver, socket_sender: &Sender, - blocktree_sender: &Sender>>, + blockstore_sender: &Sender>>, ) -> Result<()> { let receive_results = broadcast_utils::recv_slot_entries(receiver)?; - self.process_receive_results(blocktree, socket_sender, blocktree_sender, receive_results) + self.process_receive_results( + blockstore, + socket_sender, + blockstore_sender, + receive_results, + ) } fn transmit( &self, @@ -337,10 +342,10 @@ impl BroadcastRun for StandardBroadcastRun { fn record( &self, receiver: &Arc>>>>, - blocktree: &Arc, + blockstore: &Arc, ) -> Result<()> { let shreds = receiver.lock().unwrap().recv()?; - self.insert(blocktree, shreds) + self.insert(blockstore, shreds) } } @@ -350,7 +355,7 @@ mod test { use crate::cluster_info::{ClusterInfo, Node}; use crate::genesis_utils::create_genesis_config; use solana_ledger::{ - blocktree::Blocktree, entry::create_ticks, get_tmp_ledger_path, + blockstore::Blockstore, entry::create_ticks, get_tmp_ledger_path, shred::max_ticks_per_n_shreds, }; use solana_runtime::bank::Bank; @@ -365,7 +370,7 @@ mod test { fn setup( num_shreds_per_slot: Slot, ) -> ( - Arc, + Arc, GenesisConfig, Arc>, Arc, @@ -374,8 +379,8 @@ mod test { ) { // Setup let ledger_path = get_tmp_ledger_path!(); - let blocktree = Arc::new( - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), + let blockstore = Arc::new( + Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), ); let leader_keypair = Arc::new(Keypair::new()); let leader_pubkey = leader_keypair.pubkey(); @@ -388,7 +393,7 @@ mod test { genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1; let bank0 = Arc::new(Bank::new(&genesis_config)); ( - blocktree, + blockstore, genesis_config, cluster_info, bank0, @@ -433,7 +438,7 @@ mod test { fn test_slot_interrupt() { // Setup let num_shreds_per_slot = 2; - let (blocktree, genesis_config, cluster_info, bank0, leader_keypair, socket) = + let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket) = setup(num_shreds_per_slot); // Insert 1 less than the number of ticks needed to finish the slot @@ -448,14 +453,14 @@ mod test { // Step 1: Make an incomplete transmission for slot 0 let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair.clone(), 0); standard_broadcast_run - .test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results) + .test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results) .unwrap(); let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap(); assert_eq!(unfinished_slot.next_shred_index as u64, num_shreds_per_slot); assert_eq!(unfinished_slot.slot, 0); assert_eq!(unfinished_slot.parent, 0); // Make sure the slot is not complete - assert!(!blocktree.is_full(0)); + assert!(!blockstore.is_full(0)); // Modify the stats, should reset later standard_broadcast_run .stats @@ -463,10 +468,10 @@ mod test { .unwrap() .receive_elapsed = 10; - // Try to fetch ticks from blocktree, nothing should break - assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0); + // Try to fetch ticks from blockstore, nothing should break + assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0); assert_eq!( - blocktree + blockstore .get_slot_entries(0, num_shreds_per_slot, None) .unwrap(), vec![], @@ -487,7 +492,7 @@ mod test { last_tick_height: (ticks1.len() - 1) as u64, }; standard_broadcast_run - .test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results) + .test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results) .unwrap(); let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap(); @@ -503,10 +508,10 @@ mod test { 0 ); - // Try to fetch the incomplete ticks from blocktree, should succeed - assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0); + // Try to fetch the incomplete ticks from blockstore, should succeed + assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0); assert_eq!( - blocktree + blockstore .get_slot_entries(0, num_shreds_per_slot, None) .unwrap(), vec![], @@ -517,7 +522,7 @@ mod test { fn test_slot_finish() { // Setup let num_shreds_per_slot = 2; - let (blocktree, genesis_config, cluster_info, bank0, leader_keypair, socket) = + let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket) = setup(num_shreds_per_slot); // Insert complete slot of ticks needed to finish the slot @@ -531,7 +536,7 @@ mod test { let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair, 0); standard_broadcast_run - .test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results) + .test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results) .unwrap(); assert!(standard_broadcast_run.unfinished_slot.is_none()) } diff --git a/core/src/chacha.rs b/core/src/chacha.rs index b4f799cd10d0ec..baa29fdbed0588 100644 --- a/core/src/chacha.rs +++ b/core/src/chacha.rs @@ -1,4 +1,4 @@ -use solana_ledger::blocktree::Blocktree; +use solana_ledger::blockstore::Blockstore; use solana_sdk::clock::Slot; use std::fs::File; use std::io; @@ -12,7 +12,7 @@ pub const CHACHA_BLOCK_SIZE: usize = 64; pub const CHACHA_KEY_SIZE: usize = 32; pub fn chacha_cbc_encrypt_ledger( - blocktree: &Arc, + blockstore: &Arc, start_slot: Slot, slots_per_segment: u64, out_path: &Path, @@ -28,7 +28,7 @@ pub fn chacha_cbc_encrypt_ledger( let mut current_slot = start_slot; let mut start_index = 0; loop { - match blocktree.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) { + match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) { Ok((last_index, mut size)) => { debug!( "chacha: encrypting slice: {} num_shreds: {} data_len: {}", @@ -75,7 +75,7 @@ pub fn chacha_cbc_encrypt_ledger( mod tests { use crate::chacha::chacha_cbc_encrypt_ledger; use crate::gen_keys::GenKeys; - use solana_ledger::blocktree::Blocktree; + use solana_ledger::blockstore::Blockstore; use solana_ledger::entry::Entry; use solana_ledger::get_tmp_ledger_path; use solana_sdk::hash::{hash, Hash, Hasher}; @@ -131,7 +131,7 @@ mod tests { let ledger_path = get_tmp_ledger_path!(); let ticks_per_slot = 16; let slots_per_segment = 32; - let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let out_path = tmp_file_path("test_encrypt_ledger"); let seed = [2u8; 32]; @@ -139,7 +139,7 @@ mod tests { let keypair = rnd.gen_keypair(); let entries = make_tiny_deterministic_test_entries(slots_per_segment); - blocktree + blockstore .write_entries( 0, 0, @@ -157,8 +157,14 @@ mod tests { "abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234" ); - chacha_cbc_encrypt_ledger(&blocktree, 0, slots_per_segment as u64, &out_path, &mut key) - .unwrap(); + chacha_cbc_encrypt_ledger( + &blockstore, + 0, + slots_per_segment as u64, + &out_path, + &mut key, + ) + .unwrap(); let mut out_file = File::open(&out_path).unwrap(); let mut buf = vec![]; let size = out_file.read_to_end(&mut buf).unwrap(); diff --git a/core/src/chacha_cuda.rs b/core/src/chacha_cuda.rs index 7497d24414e3e2..3eb579be4e89b7 100644 --- a/core/src/chacha_cuda.rs +++ b/core/src/chacha_cuda.rs @@ -1,7 +1,7 @@ // Module used by validators to approve storage mining proofs in parallel using the GPU use crate::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE}; -use solana_ledger::blocktree::Blocktree; +use solana_ledger::blockstore::Blockstore; use solana_perf::perf_libs; use solana_sdk::hash::Hash; use std::io; @@ -13,7 +13,7 @@ use std::sync::Arc; // Then sample each block at the offsets provided by samples argument with sha256 // and return the vec of sha states pub fn chacha_cbc_encrypt_file_many_keys( - blocktree: &Arc, + blockstore: &Arc, segment: u64, slots_per_segment: u64, ivecs: &mut [u8], @@ -46,7 +46,7 @@ pub fn chacha_cbc_encrypt_file_many_keys( (api.chacha_init_sha_state)(int_sha_states.as_mut_ptr(), num_keys as u32); } loop { - match blocktree.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) { + match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) { Ok((last_index, mut size)) => { debug!( "chacha_cuda: encrypting segment: {} num_shreds: {} data_len: {}", @@ -134,9 +134,9 @@ mod tests { let entries = create_ticks(slots_per_segment, 0, Hash::default()); let ledger_path = get_tmp_ledger_path!(); let ticks_per_slot = 16; - let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - blocktree + blockstore .write_entries( 0, 0, @@ -160,7 +160,7 @@ mod tests { let mut cpu_iv = ivecs.clone(); chacha_cbc_encrypt_ledger( - &blocktree, + &blockstore, 0, slots_per_segment as u64, out_path, @@ -171,7 +171,7 @@ mod tests { let ref_hash = sample_file(&out_path, &samples).unwrap(); let hashes = chacha_cbc_encrypt_file_many_keys( - &blocktree, + &blockstore, 0, slots_per_segment as u64, &mut ivecs, @@ -196,8 +196,8 @@ mod tests { let ledger_path = get_tmp_ledger_path!(); let ticks_per_slot = 90; let entries = create_ticks(2 * ticks_per_slot, 0, Hash::default()); - let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); - blocktree + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); + blockstore .write_entries( 0, 0, @@ -224,7 +224,7 @@ mod tests { ivec[0] = i; ivecs.extend(ivec.clone().iter()); chacha_cbc_encrypt_ledger( - &blocktree.clone(), + &blockstore.clone(), 0, DEFAULT_SLOTS_PER_SEGMENT, out_path, @@ -242,7 +242,7 @@ mod tests { } let hashes = chacha_cbc_encrypt_file_many_keys( - &blocktree, + &blockstore, 0, DEFAULT_SLOTS_PER_SEGMENT, &mut ivecs, @@ -267,9 +267,9 @@ mod tests { let mut keys = hex!("abc123"); let ledger_path = get_tmp_ledger_path!(); let samples = [0]; - let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); assert!(chacha_cbc_encrypt_file_many_keys( - &blocktree, + &blockstore, 0, DEFAULT_SLOTS_PER_SEGMENT, &mut keys, diff --git a/core/src/cluster_info.rs b/core/src/cluster_info.rs index 8074fe7d383752..e6f4bf457c7437 100644 --- a/core/src/cluster_info.rs +++ b/core/src/cluster_info.rs @@ -30,7 +30,7 @@ use bincode::{serialize, serialized_size}; use core::cmp; use itertools::Itertools; use rand::{thread_rng, Rng}; -use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree, staking_utils}; +use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore, staking_utils}; use solana_measure::thread_mem_usage; use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error}; use solana_net_utils::{ @@ -1113,12 +1113,12 @@ impl ClusterInfo { } fn get_data_shred_as_packet( - blocktree: &Arc, + blockstore: &Arc, slot: Slot, shred_index: u64, dest: &SocketAddr, ) -> Result> { - let data = blocktree.get_data_shred(slot, shred_index)?; + let data = blockstore.get_data_shred(slot, shred_index)?; Ok(data.map(|data| { let mut packet = Packet::default(); packet.meta.size = data.len(); @@ -1132,14 +1132,14 @@ impl ClusterInfo { recycler: &PacketsRecycler, from: &ContactInfo, from_addr: &SocketAddr, - blocktree: Option<&Arc>, + blockstore: Option<&Arc>, me: &ContactInfo, slot: Slot, shred_index: u64, ) -> Option { - if let Some(blocktree) = blocktree { + if let Some(blockstore) = blockstore { // Try to find the requested index in one of the slots - let packet = Self::get_data_shred_as_packet(blocktree, slot, shred_index, from_addr); + let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr); if let Ok(Some(packet)) = packet { inc_new_counter_debug!("cluster_info-window-request-ledger", 1); @@ -1166,17 +1166,17 @@ impl ClusterInfo { fn run_highest_window_request( recycler: &PacketsRecycler, from_addr: &SocketAddr, - blocktree: Option<&Arc>, + blockstore: Option<&Arc>, slot: Slot, highest_index: u64, ) -> Option { - let blocktree = blocktree?; + let blockstore = blockstore?; // Try to find the requested index in one of the slots - let meta = blocktree.meta(slot).ok()??; + let meta = blockstore.meta(slot).ok()??; if meta.received > highest_index { // meta.received must be at least 1 by this point let packet = - Self::get_data_shred_as_packet(blocktree, slot, meta.received - 1, from_addr) + Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr) .ok()??; return Some(Packets::new_with_recycler_data( recycler, @@ -1190,19 +1190,19 @@ impl ClusterInfo { fn run_orphan( recycler: &PacketsRecycler, from_addr: &SocketAddr, - blocktree: Option<&Arc>, + blockstore: Option<&Arc>, mut slot: Slot, max_responses: usize, ) -> Option { let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan"); - if let Some(blocktree) = blocktree { + if let Some(blockstore) = blockstore { // Try to find the next "n" parent slots of the input slot - while let Ok(Some(meta)) = blocktree.meta(slot) { + while let Ok(Some(meta)) = blockstore.meta(slot) { if meta.received == 0 { break; } let packet = - Self::get_data_shred_as_packet(blocktree, slot, meta.received - 1, from_addr); + Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr); if let Ok(Some(packet)) = packet { res.packets.push(packet); } @@ -1222,7 +1222,7 @@ impl ClusterInfo { fn handle_packets( me: &Arc>, recycler: &PacketsRecycler, - blocktree: Option<&Arc>, + blockstore: Option<&Arc>, stakes: &HashMap, packets: Packets, response_sender: &PacketSender, @@ -1330,7 +1330,8 @@ impl ClusterInfo { ); } _ => { - let rsp = Self::handle_repair(me, recycler, &from_addr, blocktree, request); + let rsp = + Self::handle_repair(me, recycler, &from_addr, blockstore, request); if let Some(rsp) = rsp { let _ignore_disconnect = response_sender.send(rsp); } @@ -1475,7 +1476,7 @@ impl ClusterInfo { me: &Arc>, recycler: &PacketsRecycler, from_addr: &SocketAddr, - blocktree: Option<&Arc>, + blockstore: Option<&Arc>, request: Protocol, ) -> Option { let now = Instant::now(); @@ -1511,7 +1512,7 @@ impl ClusterInfo { recycler, from, &from_addr, - blocktree, + blockstore, &my_info, *slot, *shred_index, @@ -1526,7 +1527,7 @@ impl ClusterInfo { Self::run_highest_window_request( recycler, &from_addr, - blocktree, + blockstore, *slot, *highest_index, ), @@ -1539,7 +1540,7 @@ impl ClusterInfo { Self::run_orphan( recycler, &from_addr, - blocktree, + blockstore, *slot, MAX_ORPHAN_REPAIR_RESPONSES, ), @@ -1559,7 +1560,7 @@ impl ClusterInfo { fn run_listen( obj: &Arc>, recycler: &PacketsRecycler, - blocktree: Option<&Arc>, + blockstore: Option<&Arc>, bank_forks: Option<&Arc>>, requests_receiver: &PacketReceiver, response_sender: &PacketSender, @@ -1574,12 +1575,12 @@ impl ClusterInfo { None => HashMap::new(), }; - Self::handle_packets(obj, &recycler, blocktree, &stakes, reqs, response_sender); + Self::handle_packets(obj, &recycler, blockstore, &stakes, reqs, response_sender); Ok(()) } pub fn listen( me: Arc>, - blocktree: Option>, + blockstore: Option>, bank_forks: Option>>, requests_receiver: PacketReceiver, response_sender: PacketSender, @@ -1593,7 +1594,7 @@ impl ClusterInfo { let e = Self::run_listen( &me, &recycler, - blocktree.as_ref(), + blockstore.as_ref(), bank_forks.as_ref(), &requests_receiver, &response_sender, @@ -1916,9 +1917,9 @@ mod tests { use crate::repair_service::RepairType; use crate::result::Error; use rayon::prelude::*; - use solana_ledger::blocktree::make_many_slot_entries; - use solana_ledger::blocktree::Blocktree; - use solana_ledger::blocktree_processor::fill_blocktree_slot_with_ticks; + use solana_ledger::blockstore::make_many_slot_entries; + use solana_ledger::blockstore::Blockstore; + use solana_ledger::blockstore_processor::fill_blockstore_slot_with_ticks; use solana_ledger::get_tmp_ledger_path; use solana_ledger::shred::{ max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader, @@ -2062,7 +2063,7 @@ mod tests { solana_logger::setup(); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let me = ContactInfo::new( &Pubkey::new_rand(), socketaddr!("127.0.0.1:1234"), @@ -2080,7 +2081,7 @@ mod tests { &recycler, &me, &socketaddr_any!(), - Some(&blocktree), + Some(&blockstore), &me, 0, 0, @@ -2097,7 +2098,7 @@ mod tests { CodingShredHeader::default(), ); - blocktree + blockstore .insert_shreds(vec![shred_info], None, false) .expect("Expect successful ledger write"); @@ -2105,7 +2106,7 @@ mod tests { &recycler, &me, &socketaddr_any!(), - Some(&blocktree), + Some(&blockstore), &me, 2, 1, @@ -2121,7 +2122,7 @@ mod tests { assert_eq!(rv[0].slot(), 2); } - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } /// test run_window_requestwindow requests respond with the right shred, and do not overrun @@ -2131,18 +2132,18 @@ mod tests { solana_logger::setup(); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let rv = ClusterInfo::run_highest_window_request( &recycler, &socketaddr_any!(), - Some(&blocktree), + Some(&blockstore), 0, 0, ); assert!(rv.is_none()); - let _ = fill_blocktree_slot_with_ticks( - &blocktree, + let _ = fill_blockstore_slot_with_ticks( + &blockstore, max_ticks_per_n_shreds(1) + 1, 2, 1, @@ -2152,7 +2153,7 @@ mod tests { let rv = ClusterInfo::run_highest_window_request( &recycler, &socketaddr_any!(), - Some(&blocktree), + Some(&blockstore), 2, 1, ); @@ -2163,21 +2164,21 @@ mod tests { .filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok()) .collect(); assert!(!rv.is_empty()); - let index = blocktree.meta(2).unwrap().unwrap().received - 1; + let index = blockstore.meta(2).unwrap().unwrap().received - 1; assert_eq!(rv[0].index(), index as u32); assert_eq!(rv[0].slot(), 2); let rv = ClusterInfo::run_highest_window_request( &recycler, &socketaddr_any!(), - Some(&blocktree), + Some(&blockstore), 2, index + 1, ); assert!(rv.is_none()); } - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] @@ -2186,25 +2187,27 @@ mod tests { let recycler = PacketsRecycler::default(); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); - let rv = ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 2, 0); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); + let rv = + ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0); assert!(rv.is_none()); // Create slots 1, 2, 3 with 5 shreds apiece let (shreds, _) = make_many_slot_entries(1, 3, 5); - blocktree + blockstore .insert_shreds(shreds, None, false) .expect("Expect successful ledger write"); // We don't have slot 4, so we don't know how to service this requeset - let rv = ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 4, 5); + let rv = + ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5); assert!(rv.is_none()); // For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively // for this request let rv: Vec<_> = - ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 3, 5) + ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5) .expect("run_orphan packets") .packets .iter() @@ -2213,9 +2216,9 @@ mod tests { let expected: Vec<_> = (1..=3) .rev() .map(|slot| { - let index = blocktree.meta(slot).unwrap().unwrap().received - 1; + let index = blockstore.meta(slot).unwrap().unwrap().received - 1; ClusterInfo::get_data_shred_as_packet( - &blocktree, + &blockstore, slot, index, &socketaddr_any!(), @@ -2227,7 +2230,7 @@ mod tests { assert_eq!(rv, expected) } - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } fn assert_in_range(x: u16, range: (u16, u16)) { diff --git a/core/src/cluster_info_repair_listener.rs b/core/src/cluster_info_repair_listener.rs index eeb3d928dfe544..476b9c27c9e83f 100644 --- a/core/src/cluster_info_repair_listener.rs +++ b/core/src/cluster_info_repair_listener.rs @@ -5,7 +5,7 @@ use byteorder::{ByteOrder, LittleEndian}; use rand::seq::SliceRandom; use rand::SeedableRng; use rand_chacha::ChaChaRng; -use solana_ledger::blocktree::Blocktree; +use solana_ledger::blockstore::Blockstore; use solana_ledger::rooted_slot_iterator::RootedSlotIterator; use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey}; use std::{ @@ -89,13 +89,13 @@ pub struct ClusterInfoRepairListener { impl ClusterInfoRepairListener { pub fn new( - blocktree: &Arc, + blockstore: &Arc, exit: &Arc, cluster_info: Arc>, epoch_schedule: EpochSchedule, ) -> Self { let exit = exit.clone(); - let blocktree = blocktree.clone(); + let blockstore = blockstore.clone(); let thread = Builder::new() .name("solana-cluster_info_repair_listener".to_string()) .spawn(move || { @@ -105,7 +105,7 @@ impl ClusterInfoRepairListener { // 2) The latest root the peer gossiped let mut peer_infos: HashMap = HashMap::new(); let _ = Self::recv_loop( - &blocktree, + &blockstore, &mut peer_infos, &exit, &cluster_info, @@ -119,7 +119,7 @@ impl ClusterInfoRepairListener { } fn recv_loop( - blocktree: &Blocktree, + blockstore: &Blockstore, peer_infos: &mut HashMap, exit: &Arc, cluster_info: &Arc>, @@ -134,7 +134,7 @@ impl ClusterInfoRepairListener { return Ok(()); } - let lowest_slot = blocktree.lowest_slot(); + let lowest_slot = blockstore.lowest_slot(); let peers = cluster_info.read().unwrap().gossip_peers(); let mut peers_needing_repairs: HashMap = HashMap::new(); @@ -156,7 +156,7 @@ impl ClusterInfoRepairListener { // After updating all the peers, send out repairs to those that need it let _ = Self::serve_repairs( &my_pubkey, - blocktree, + blockstore, peer_infos, &peers_needing_repairs, &socket, @@ -219,7 +219,7 @@ impl ClusterInfoRepairListener { fn serve_repairs( my_pubkey: &Pubkey, - blocktree: &Blocktree, + blockstore: &Blockstore, peer_infos: &mut HashMap, repairees: &HashMap, socket: &UdpSocket, @@ -258,7 +258,7 @@ impl ClusterInfoRepairListener { my_pubkey, repairee_pubkey, my_root, - blocktree, + blockstore, &repairee_epoch_slots, &eligible_repairmen, socket, @@ -286,7 +286,7 @@ impl ClusterInfoRepairListener { my_pubkey: &Pubkey, repairee_pubkey: &Pubkey, my_root: Slot, - blocktree: &Blocktree, + blockstore: &Blockstore, repairee_epoch_slots: &EpochSlots, eligible_repairmen: &[&Pubkey], socket: &UdpSocket, @@ -295,7 +295,7 @@ impl ClusterInfoRepairListener { epoch_schedule: &EpochSchedule, last_repaired_slot_and_ts: (u64, u64), ) -> Result> { - let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blocktree); + let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blockstore); if slot_iter.is_err() { info!( "Root for repairee is on different fork. My root: {}, repairee_root: {} repairee_pubkey: {:?}", @@ -366,17 +366,17 @@ impl ClusterInfoRepairListener { // a database iterator over the slots because by the time this node is // sending the shreds in this slot for repair, we expect these slots // to be full. - if let Some(shred_data) = blocktree + if let Some(shred_data) = blockstore .get_data_shred(slot, shred_index as u64) - .expect("Failed to read data shred from blocktree") + .expect("Failed to read data shred from blockstore") { socket.send_to(&shred_data[..], repairee_addr)?; total_data_shreds_sent += 1; } - if let Some(coding_bytes) = blocktree + if let Some(coding_bytes) = blockstore .get_coding_shred(slot, shred_index as u64) - .expect("Failed to read coding shred from blocktree") + .expect("Failed to read coding shred from blockstore") { socket.send_to(&coding_bytes[..], repairee_addr)?; total_coding_shreds_sent += 1; @@ -550,7 +550,7 @@ mod tests { use crate::packet::Packets; use crate::streamer; use crate::streamer::PacketReceiver; - use solana_ledger::blocktree::make_many_slot_entries; + use solana_ledger::blockstore::make_many_slot_entries; use solana_ledger::get_tmp_ledger_path; use solana_perf::recycler::Recycler; use std::collections::BTreeSet; @@ -699,16 +699,16 @@ mod tests { #[test] fn test_serve_same_repairs_to_repairee() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_slots = 2; let (shreds, _) = make_many_slot_entries(0, num_slots, 1); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); // Write roots so that these slots will qualify to be sent by the repairman let last_root = num_slots - 1; let roots: Vec<_> = (0..=last_root).collect(); - blocktree.set_roots(&roots).unwrap(); + blockstore.set_roots(&roots).unwrap(); // Set up my information let my_pubkey = Pubkey::new_rand(); @@ -729,7 +729,7 @@ mod tests { &my_pubkey, &mock_repairee.id, num_slots - 1, - &blocktree, + &blockstore, &repairee_epoch_slots, &eligible_repairmen, &my_socket, @@ -749,7 +749,7 @@ mod tests { &my_pubkey, &mock_repairee.id, num_slots - 1, - &blocktree, + &blockstore, &repairee_epoch_slots, &eligible_repairmen, &my_socket, @@ -765,20 +765,20 @@ mod tests { #[test] fn test_serve_repairs_to_repairee() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let entries_per_slot = 5; let num_slots = 10; assert_eq!(num_slots % 2, 0); let (shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot); let num_shreds_per_slot = shreds.len() as u64 / num_slots; - // Write slots in the range [0, num_slots] to blocktree - blocktree.insert_shreds(shreds, None, false).unwrap(); + // Write slots in the range [0, num_slots] to blockstore + blockstore.insert_shreds(shreds, None, false).unwrap(); // Write roots so that these slots will qualify to be sent by the repairman let roots: Vec<_> = (0..=num_slots - 1).collect(); - blocktree.set_roots(&roots).unwrap(); + blockstore.set_roots(&roots).unwrap(); // Set up my information let my_pubkey = Pubkey::new_rand(); @@ -809,7 +809,7 @@ mod tests { &repairman_pubkey, &mock_repairee.id, num_slots - 1, - &blocktree, + &blockstore, &repairee_epoch_slots, &eligible_repairmen_refs, &my_socket, @@ -848,26 +848,26 @@ mod tests { // Shutdown mock_repairee.close().unwrap(); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_no_repair_past_confirmed_epoch() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let stakers_slot_offset = 16; let slots_per_epoch = stakers_slot_offset * 2; let epoch_schedule = EpochSchedule::custom(slots_per_epoch, stakers_slot_offset, false); - // Create shreds for first two epochs and write them to blocktree + // Create shreds for first two epochs and write them to blockstore let total_slots = slots_per_epoch * 2; let (shreds, _) = make_many_slot_entries(0, total_slots, 1); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); // Write roots so that these slots will qualify to be sent by the repairman let roots: Vec<_> = (0..=slots_per_epoch * 2 - 1).collect(); - blocktree.set_roots(&roots).unwrap(); + blockstore.set_roots(&roots).unwrap(); // Set up my information let my_pubkey = Pubkey::new_rand(); @@ -896,7 +896,7 @@ mod tests { &my_pubkey, &mock_repairee.id, total_slots - 1, - &blocktree, + &blockstore, &repairee_epoch_slots, &vec![&my_pubkey], &my_socket, @@ -919,7 +919,7 @@ mod tests { &my_pubkey, &mock_repairee.id, total_slots - 1, - &blocktree, + &blockstore, &repairee_epoch_slots, &vec![&my_pubkey], &my_socket, @@ -936,8 +936,8 @@ mod tests { // Shutdown mock_repairee.close().unwrap(); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] diff --git a/core/src/gossip_service.rs b/core/src/gossip_service.rs index e9cda405b66a86..81e235c435052b 100644 --- a/core/src/gossip_service.rs +++ b/core/src/gossip_service.rs @@ -6,7 +6,7 @@ use crate::streamer; use rand::{thread_rng, Rng}; use solana_client::thin_client::{create_client, ThinClient}; use solana_ledger::bank_forks::BankForks; -use solana_ledger::blocktree::Blocktree; +use solana_ledger::blockstore::Blockstore; use solana_perf::recycler::Recycler; use solana_sdk::pubkey::Pubkey; use solana_sdk::signature::{Keypair, KeypairUtil}; @@ -24,7 +24,7 @@ pub struct GossipService { impl GossipService { pub fn new( cluster_info: &Arc>, - blocktree: Option>, + blockstore: Option>, bank_forks: Option>>, gossip_socket: UdpSocket, exit: &Arc, @@ -47,7 +47,7 @@ impl GossipService { let t_responder = streamer::responder("gossip", gossip_socket, response_receiver); let t_listen = ClusterInfo::listen( cluster_info.clone(), - blocktree, + blockstore, bank_forks.clone(), request_receiver, response_sender.clone(), diff --git a/core/src/ledger_cleanup_service.rs b/core/src/ledger_cleanup_service.rs index 2b430343c35344..2cc75460ae0d7f 100644 --- a/core/src/ledger_cleanup_service.rs +++ b/core/src/ledger_cleanup_service.rs @@ -1,6 +1,6 @@ //! The `ledger_cleanup_service` drops older ledger data to limit disk space usage -use solana_ledger::blocktree::Blocktree; +use solana_ledger::blockstore::Blockstore; use solana_metrics::datapoint_debug; use solana_sdk::clock::Slot; use std::string::ToString; @@ -27,7 +27,7 @@ pub struct LedgerCleanupService { impl LedgerCleanupService { pub fn new( new_root_receiver: Receiver, - blocktree: Arc, + blockstore: Arc, max_ledger_slots: u64, exit: &Arc, ) -> Self { @@ -45,7 +45,7 @@ impl LedgerCleanupService { } if let Err(e) = Self::cleanup_ledger( &new_root_receiver, - &blocktree, + &blockstore, max_ledger_slots, &mut next_purge_batch, ) { @@ -61,20 +61,20 @@ impl LedgerCleanupService { fn cleanup_ledger( new_root_receiver: &Receiver, - blocktree: &Arc, + blockstore: &Arc, max_ledger_slots: u64, next_purge_batch: &mut u64, ) -> Result<(), RecvTimeoutError> { - let disk_utilization_pre = blocktree.storage_size(); + let disk_utilization_pre = blockstore.storage_size(); let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?; if root > *next_purge_batch { //cleanup - blocktree.purge_slots(0, Some(root - max_ledger_slots)); + blockstore.purge_slots(0, Some(root - max_ledger_slots)); *next_purge_batch += DEFAULT_PURGE_BATCH_SIZE; } - let disk_utilization_post = blocktree.storage_size(); + let disk_utilization_post = blockstore.storage_size(); if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) = (disk_utilization_pre, disk_utilization_post) @@ -101,39 +101,39 @@ impl LedgerCleanupService { #[cfg(test)] mod tests { use super::*; - use solana_ledger::blocktree::make_many_slot_entries; + use solana_ledger::blockstore::make_many_slot_entries; use solana_ledger::get_tmp_ledger_path; use std::sync::mpsc::channel; #[test] fn test_cleanup() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let (shreds, _) = make_many_slot_entries(0, 50, 5); - blocktree.insert_shreds(shreds, None, false).unwrap(); - let blocktree = Arc::new(blocktree); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let blockstore = Arc::new(blockstore); let (sender, receiver) = channel(); //send a signal to kill slots 0-40 let mut next_purge_slot = 0; sender.send(50).unwrap(); - LedgerCleanupService::cleanup_ledger(&receiver, &blocktree, 10, &mut next_purge_slot) + LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 10, &mut next_purge_slot) .unwrap(); //check that 0-40 don't exist - blocktree + blockstore .slot_meta_iterator(0) .unwrap() .for_each(|(slot, _)| assert!(slot > 40)); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_compaction() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap()); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap()); let n = 10_000; let batch_size = 100; @@ -142,10 +142,10 @@ mod tests { for i in 0..batches { let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); } - let u1 = blocktree.storage_size().unwrap() as f64; + let u1 = blockstore.storage_size().unwrap() as f64; // send signal to cleanup slots let (sender, receiver) = channel(); @@ -153,7 +153,7 @@ mod tests { let mut next_purge_batch = 0; LedgerCleanupService::cleanup_ledger( &receiver, - &blocktree, + &blockstore, max_ledger_slots, &mut next_purge_batch, ) @@ -161,18 +161,18 @@ mod tests { thread::sleep(Duration::from_secs(2)); - let u2 = blocktree.storage_size().unwrap() as f64; + let u2 = blockstore.storage_size().unwrap() as f64; assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,); // check that early slots don't exist let max_slot = n - max_ledger_slots; - blocktree + blockstore .slot_meta_iterator(0) .unwrap() .for_each(|(slot, _)| assert!(slot > max_slot)); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } } diff --git a/core/src/poh_recorder.rs b/core/src/poh_recorder.rs index 0d2cebaf0f035f..2c0e8e620e3f0d 100644 --- a/core/src/poh_recorder.rs +++ b/core/src/poh_recorder.rs @@ -10,7 +10,7 @@ //! For Entries: //! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::max_tick_height //! -use solana_ledger::blocktree::Blocktree; +use solana_ledger::blockstore::Blockstore; use solana_ledger::entry::Entry; use solana_ledger::leader_schedule_cache::LeaderScheduleCache; use solana_ledger::poh::Poh; @@ -70,7 +70,7 @@ pub struct PohRecorder { leader_last_tick_height: u64, // zero if none grace_ticks: u64, id: Pubkey, - blocktree: Arc, + blockstore: Arc, leader_schedule_cache: Arc, poh_config: Arc, ticks_per_slot: u64, @@ -84,7 +84,7 @@ impl PohRecorder { &self.id, bank.slot(), &bank, - Some(&self.blocktree), + Some(&self.blockstore), ); assert_eq!(self.ticks_per_slot, bank.ticks_per_slot()); let (leader_first_tick_height, leader_last_tick_height, grace_ticks) = @@ -407,7 +407,7 @@ impl PohRecorder { next_leader_slot: Option<(Slot, Slot)>, ticks_per_slot: u64, id: &Pubkey, - blocktree: &Arc, + blockstore: &Arc, clear_bank_signal: Option>, leader_schedule_cache: &Arc, poh_config: &Arc, @@ -433,7 +433,7 @@ impl PohRecorder { leader_last_tick_height, grace_ticks, id: *id, - blocktree: blocktree.clone(), + blockstore: blockstore.clone(), leader_schedule_cache: leader_schedule_cache.clone(), ticks_per_slot, poh_config: poh_config.clone(), @@ -452,7 +452,7 @@ impl PohRecorder { next_leader_slot: Option<(Slot, Slot)>, ticks_per_slot: u64, id: &Pubkey, - blocktree: &Arc, + blockstore: &Arc, leader_schedule_cache: &Arc, poh_config: &Arc, ) -> (Self, Receiver) { @@ -463,7 +463,7 @@ impl PohRecorder { next_leader_slot, ticks_per_slot, id, - blocktree, + blockstore, None, leader_schedule_cache, poh_config, @@ -475,7 +475,7 @@ impl PohRecorder { mod tests { use super::*; use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo}; - use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; + use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}; use solana_perf::test_tx::test_tx; use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT; use solana_sdk::hash::hash; @@ -486,8 +486,8 @@ mod tests { let prev_hash = Hash::default(); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let (mut poh_recorder, _entry_receiver) = PohRecorder::new( 0, @@ -496,7 +496,7 @@ mod tests { Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), ); @@ -505,7 +505,7 @@ mod tests { assert_eq!(poh_recorder.tick_cache[0].1, 1); assert_eq!(poh_recorder.tick_height, 1); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] @@ -513,8 +513,8 @@ mod tests { let prev_hash = Hash::default(); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let (mut poh_recorder, _entry_receiver) = PohRecorder::new( 0, @@ -523,7 +523,7 @@ mod tests { Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), ); @@ -533,15 +533,15 @@ mod tests { assert_eq!(poh_recorder.tick_cache[1].1, 2); assert_eq!(poh_recorder.tick_height, 2); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_poh_recorder_reset_clears_cache() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let (mut poh_recorder, _entry_receiver) = PohRecorder::new( 0, Hash::default(), @@ -549,7 +549,7 @@ mod tests { Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), ); @@ -558,15 +558,15 @@ mod tests { poh_recorder.reset(Hash::default(), 0, Some((4, 4))); assert_eq!(poh_recorder.tick_cache.len(), 0); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_poh_recorder_clear() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); @@ -577,7 +577,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -592,15 +592,15 @@ mod tests { poh_recorder.clear_bank(); assert!(poh_recorder.working_bank.is_none()); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_poh_recorder_tick_sent_after_min() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); @@ -611,7 +611,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -641,15 +641,15 @@ mod tests { assert_eq!(num_entries, 3); assert!(poh_recorder.working_bank.is_none()); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_poh_recorder_tick_sent_upto_and_including_max() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); @@ -660,7 +660,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -688,15 +688,15 @@ mod tests { } assert_eq!(num_entries, 3); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_poh_recorder_record_to_early() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); @@ -707,7 +707,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -726,15 +726,15 @@ mod tests { .is_err()); assert!(entry_receiver.try_recv().is_err()); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_poh_recorder_record_bad_slot() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); @@ -745,7 +745,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -766,15 +766,15 @@ mod tests { Err(PohRecorderError::MaxHeightReached) ); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_poh_recorder_record_at_min_passes() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); @@ -785,7 +785,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -812,15 +812,15 @@ mod tests { let (_bank, (e, _tick_height)) = entry_receiver.recv().expect("recv 2"); assert!(!e.is_tick()); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_poh_recorder_record_at_max_fails() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); @@ -831,7 +831,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -856,15 +856,15 @@ mod tests { let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); assert!(entry.is_tick()); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_poh_cache_on_disconnect() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); @@ -875,7 +875,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -894,15 +894,15 @@ mod tests { assert!(poh_recorder.working_bank.is_none()); assert_eq!(poh_recorder.tick_cache.len(), 3); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_reset_current() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let (mut poh_recorder, _entry_receiver) = PohRecorder::new( 0, Hash::default(), @@ -910,7 +910,7 @@ mod tests { Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), ); @@ -921,15 +921,15 @@ mod tests { poh_recorder.reset(hash, 0, Some((4, 4))); assert_eq!(poh_recorder.tick_cache.len(), 0); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_reset_with_cached() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let (mut poh_recorder, _entry_receiver) = PohRecorder::new( 0, Hash::default(), @@ -937,7 +937,7 @@ mod tests { Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), ); @@ -947,7 +947,7 @@ mod tests { poh_recorder.reset(poh_recorder.tick_cache[0].0.hash, 0, Some((4, 4))); assert_eq!(poh_recorder.tick_cache.len(), 0); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] @@ -956,8 +956,8 @@ mod tests { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let (mut poh_recorder, _entry_receiver) = PohRecorder::new( 0, Hash::default(), @@ -965,7 +965,7 @@ mod tests { Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), ); @@ -980,15 +980,15 @@ mod tests { poh_recorder.tick(); assert_eq!(poh_recorder.tick_height, DEFAULT_TICKS_PER_SLOT + 1); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_reset_clear_bank() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let (mut poh_recorder, _entry_receiver) = PohRecorder::new( @@ -998,7 +998,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -1011,15 +1011,15 @@ mod tests { poh_recorder.reset(hash(b"hello"), 0, Some((4, 4))); assert!(poh_recorder.working_bank.is_none()); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] pub fn test_clear_signal() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let (sender, receiver) = sync_channel(1); @@ -1030,7 +1030,7 @@ mod tests { None, bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), Some(sender), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), @@ -1039,7 +1039,7 @@ mod tests { poh_recorder.clear_bank(); assert!(receiver.try_recv().is_ok()); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] @@ -1047,8 +1047,8 @@ mod tests { solana_logger::setup(); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let ticks_per_slot = 5; let GenesisConfigInfo { mut genesis_config, .. @@ -1064,7 +1064,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -1091,7 +1091,7 @@ mod tests { // Make sure the starting slot is updated assert_eq!(poh_recorder.start_slot, end_slot); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] @@ -1100,8 +1100,8 @@ mod tests { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); @@ -1112,7 +1112,7 @@ mod tests { None, bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -1213,15 +1213,15 @@ mod tests { assert_eq!(grace_ticks, overshoot_factor * bank.ticks_per_slot()); assert_eq!(leader_slot, 9); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] fn test_would_be_leader_soon() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); @@ -1232,7 +1232,7 @@ mod tests { None, bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); @@ -1287,8 +1287,8 @@ mod tests { let ledger_path = get_tmp_ledger_path!(); { // test that virtual ticks are flushed into a newly set bank asap - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let genesis_hash = bank.last_blockhash(); @@ -1300,7 +1300,7 @@ mod tests { Some((2, 2)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), ); diff --git a/core/src/poh_service.rs b/core/src/poh_service.rs index 554ac053469d18..14f6418aec885a 100644 --- a/core/src/poh_service.rs +++ b/core/src/poh_service.rs @@ -123,7 +123,7 @@ mod tests { use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use crate::poh_recorder::WorkingBank; use solana_ledger::leader_schedule_cache::LeaderScheduleCache; - use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; + use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}; use solana_perf::test_tx::test_tx; use solana_runtime::bank::Bank; use solana_sdk::hash::hash; @@ -137,8 +137,8 @@ mod tests { let prev_hash = bank.last_blockhash(); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); + let blockstore = Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"); let poh_config = Arc::new(PohConfig { hashes_per_tick: Some(2), target_tick_duration: Duration::from_millis(42), @@ -151,7 +151,7 @@ mod tests { Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), - &Arc::new(blocktree), + &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &poh_config, ); @@ -230,6 +230,6 @@ mod tests { let _ = poh_service.join().unwrap(); let _ = entry_producer.join().unwrap(); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } } diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index 5fdfa8ebfa0b5a..2dc77c1f795475 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -6,7 +6,7 @@ use crate::{ }; use solana_ledger::{ bank_forks::BankForks, - blocktree::{Blocktree, CompletedSlotsReceiver, SlotMeta}, + blockstore::{Blockstore, CompletedSlotsReceiver, SlotMeta}, }; use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey}; use std::{ @@ -71,7 +71,7 @@ pub struct RepairService { impl RepairService { pub fn new( - blocktree: Arc, + blockstore: Arc, exit: Arc, repair_socket: Arc, cluster_info: Arc>, @@ -81,7 +81,7 @@ impl RepairService { RepairStrategy::RepairAll { ref epoch_schedule, .. } => Some(ClusterInfoRepairListener::new( - &blocktree, + &blockstore, &exit, cluster_info.clone(), *epoch_schedule, @@ -94,7 +94,7 @@ impl RepairService { .name("solana-repair-service".to_string()) .spawn(move || { Self::run( - &blocktree, + &blockstore, &exit, &repair_socket, &cluster_info, @@ -110,7 +110,7 @@ impl RepairService { } fn run( - blocktree: &Arc, + blockstore: &Arc, exit: &Arc, repair_socket: &Arc, cluster_info: &Arc>, @@ -123,10 +123,10 @@ impl RepairService { ref epoch_schedule, .. } = repair_strategy { - current_root = blocktree.last_root(); + current_root = blockstore.last_root(); Self::initialize_epoch_slots( id, - blocktree, + blockstore, &mut epoch_slots, current_root, epoch_schedule, @@ -143,7 +143,7 @@ impl RepairService { RepairStrategy::RepairRange(ref repair_slot_range) => { // Strategy used by archivers Self::generate_repairs_in_range( - blocktree, + blockstore, MAX_REPAIR_LENGTH, repair_slot_range, ) @@ -153,8 +153,8 @@ impl RepairService { ref completed_slots_receiver, .. } => { - let new_root = blocktree.last_root(); - let lowest_slot = blocktree.lowest_slot(); + let new_root = blockstore.last_root(); + let lowest_slot = blockstore.lowest_slot(); Self::update_epoch_slots( id, new_root, @@ -164,7 +164,7 @@ impl RepairService { &cluster_info, completed_slots_receiver, ); - Self::generate_repairs(blocktree, new_root, MAX_REPAIR_LENGTH) + Self::generate_repairs(blockstore, new_root, MAX_REPAIR_LENGTH) } } }; @@ -195,7 +195,7 @@ impl RepairService { // Generate repairs for all slots `x` in the repair_range.start <= x <= repair_range.end pub fn generate_repairs_in_range( - blocktree: &Blocktree, + blockstore: &Blockstore, max_repairs: usize, repair_range: &RepairSlotRange, ) -> Result> { @@ -206,7 +206,7 @@ impl RepairService { break; } - let meta = blocktree + let meta = blockstore .meta(slot) .expect("Unable to lookup slot meta") .unwrap_or(SlotMeta { @@ -215,7 +215,7 @@ impl RepairService { }); let new_repairs = Self::generate_repairs_for_slot( - blocktree, + blockstore, slot, &meta, max_repairs - repairs.len(), @@ -227,18 +227,18 @@ impl RepairService { } fn generate_repairs( - blocktree: &Blocktree, + blockstore: &Blockstore, root: Slot, max_repairs: usize, ) -> Result> { // Slot height and shred indexes for shreds we want to repair let mut repairs: Vec = vec![]; - Self::generate_repairs_for_fork(blocktree, &mut repairs, max_repairs, root); + Self::generate_repairs_for_fork(blockstore, &mut repairs, max_repairs, root); // TODO: Incorporate gossip to determine priorities for repair? - // Try to resolve orphans in blocktree - let mut orphans = blocktree.get_orphans(Some(MAX_ORPHANS)); + // Try to resolve orphans in blockstore + let mut orphans = blockstore.get_orphans(Some(MAX_ORPHANS)); orphans.retain(|x| *x > root); Self::generate_repairs_for_orphans(&orphans[..], &mut repairs); @@ -246,7 +246,7 @@ impl RepairService { } fn generate_repairs_for_slot( - blocktree: &Blocktree, + blockstore: &Blockstore, slot: Slot, slot_meta: &SlotMeta, max_repairs: usize, @@ -256,7 +256,7 @@ impl RepairService { } else if slot_meta.consumed == slot_meta.received { vec![RepairType::HighestShred(slot, slot_meta.received)] } else { - let reqs = blocktree.find_missing_data_indexes( + let reqs = blockstore.find_missing_data_indexes( slot, slot_meta.first_shred_timestamp, slot_meta.consumed, @@ -275,7 +275,7 @@ impl RepairService { /// Repairs any fork starting at the input slot fn generate_repairs_for_fork( - blocktree: &Blocktree, + blockstore: &Blockstore, repairs: &mut Vec, max_repairs: usize, slot: Slot, @@ -283,9 +283,9 @@ impl RepairService { let mut pending_slots = vec![slot]; while repairs.len() < max_repairs && !pending_slots.is_empty() { let slot = pending_slots.pop().unwrap(); - if let Some(slot_meta) = blocktree.meta(slot).unwrap() { + if let Some(slot_meta) = blockstore.meta(slot).unwrap() { let new_repairs = Self::generate_repairs_for_slot( - blocktree, + blockstore, slot, &slot_meta, max_repairs - repairs.len(), @@ -300,7 +300,7 @@ impl RepairService { } fn get_completed_slots_past_root( - blocktree: &Blocktree, + blockstore: &Blockstore, slots_in_gossip: &mut BTreeSet, root: Slot, epoch_schedule: &EpochSchedule, @@ -308,7 +308,7 @@ impl RepairService { let last_confirmed_epoch = epoch_schedule.get_leader_schedule_epoch(root); let last_epoch_slot = epoch_schedule.get_last_slot_in_epoch(last_confirmed_epoch); - let meta_iter = blocktree + let meta_iter = blockstore .slot_meta_iterator(root + 1) .expect("Couldn't get db iterator"); @@ -324,22 +324,22 @@ impl RepairService { fn initialize_epoch_slots( id: Pubkey, - blocktree: &Blocktree, + blockstore: &Blockstore, slots_in_gossip: &mut BTreeSet, root: Slot, epoch_schedule: &EpochSchedule, cluster_info: &RwLock, ) { - Self::get_completed_slots_past_root(blocktree, slots_in_gossip, root, epoch_schedule); + Self::get_completed_slots_past_root(blockstore, slots_in_gossip, root, epoch_schedule); // Safe to set into gossip because by this time, the leader schedule cache should - // also be updated with the latest root (done in blocktree_processor) and thus + // also be updated with the latest root (done in blockstore_processor) and thus // will provide a schedule to window_service for any incoming shreds up to the // last_confirmed_epoch. cluster_info.write().unwrap().push_epoch_slots( id, root, - blocktree.lowest_slot(), + blockstore.lowest_slot(), slots_in_gossip.clone(), ); } @@ -409,60 +409,60 @@ mod test { use itertools::Itertools; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; - use solana_ledger::blocktree::{ + use solana_ledger::blockstore::{ make_chaining_slot_entries, make_many_slot_entries, make_slot_entries, }; use solana_ledger::shred::max_ticks_per_n_shreds; - use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; + use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}; use std::sync::mpsc::channel; use std::thread::Builder; #[test] pub fn test_repair_orphan() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Create some orphan slots let (mut shreds, _) = make_slot_entries(1, 0, 1); let (shreds2, _) = make_slot_entries(5, 2, 1); shreds.extend(shreds2); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); assert_eq!( - RepairService::generate_repairs(&blocktree, 0, 2).unwrap(), + RepairService::generate_repairs(&blockstore, 0, 2).unwrap(), vec![RepairType::HighestShred(0, 0), RepairType::Orphan(2)] ); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_repair_empty_slot() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let (shreds, _) = make_slot_entries(2, 0, 1); // Write this shred to slot 2, should chain to slot 0, which we haven't received // any shreds for - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); // Check that repair tries to patch the empty slot assert_eq!( - RepairService::generate_repairs(&blocktree, 0, 2).unwrap(), + RepairService::generate_repairs(&blockstore, 0, 2).unwrap(), vec![RepairType::HighestShred(0, 0)] ); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_generate_repairs() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let nth = 3; let num_slots = 2; @@ -483,7 +483,7 @@ mod test { missing_indexes_per_slot.insert(0, index); } } - blocktree + blockstore .insert_shreds(shreds_to_write, None, false) .unwrap(); // sleep so that the holes are ready for repair @@ -497,23 +497,23 @@ mod test { .collect(); assert_eq!( - RepairService::generate_repairs(&blocktree, 0, std::usize::MAX).unwrap(), + RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(), expected ); assert_eq!( - RepairService::generate_repairs(&blocktree, 0, expected.len() - 2).unwrap()[..], + RepairService::generate_repairs(&blockstore, 0, expected.len() - 2).unwrap()[..], expected[0..expected.len() - 2] ); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_generate_highest_repair() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_entries_per_slot = 100; @@ -524,25 +524,25 @@ mod test { // Remove last shred (which is also last in slot) so that slot is not complete shreds.pop(); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); // We didn't get the last shred for this slot, so ask for the highest shred for that slot let expected: Vec = vec![RepairType::HighestShred(0, num_shreds_per_slot - 1)]; assert_eq!( - RepairService::generate_repairs(&blocktree, 0, std::usize::MAX).unwrap(), + RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(), expected ); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_repair_range() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let slots: Vec = vec![1, 3, 5, 7, 8]; let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1; @@ -550,7 +550,7 @@ mod test { let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot); for (mut slot_shreds, _) in shreds.into_iter() { slot_shreds.remove(0); - blocktree.insert_shreds(slot_shreds, None, false).unwrap(); + blockstore.insert_shreds(slot_shreds, None, false).unwrap(); } // sleep to make slot eligible for repair sleep(Duration::from_secs(1)); @@ -574,7 +574,7 @@ mod test { assert_eq!( RepairService::generate_repairs_in_range( - &blocktree, + &blockstore, std::usize::MAX, &repair_slot_range ) @@ -584,14 +584,14 @@ mod test { } } } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_repair_range_highest() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_entries_per_slot = 10; @@ -603,7 +603,7 @@ mod test { let parent = if i > 0 { i - 1 } else { 0 }; let (shreds, _) = make_slot_entries(i, parent, num_entries_per_slot as u64); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); } let end = 4; @@ -619,7 +619,7 @@ mod test { assert_eq!( RepairService::generate_repairs_in_range( - &blocktree, + &blockstore, std::usize::MAX, &repair_slot_range ) @@ -627,14 +627,14 @@ mod test { expected ); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_get_completed_slots_past_root() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_entries_per_slot = 10; let root = 10; @@ -656,8 +656,8 @@ mod test { .collect(); let mut full_slots = BTreeSet::new(); - blocktree.insert_shreds(fork1_shreds, None, false).unwrap(); - blocktree + blockstore.insert_shreds(fork1_shreds, None, false).unwrap(); + blockstore .insert_shreds(fork2_incomplete_shreds, None, false) .unwrap(); @@ -665,7 +665,7 @@ mod test { let epoch_schedule = EpochSchedule::custom(32, 32, false); RepairService::get_completed_slots_past_root( - &blocktree, + &blockstore, &mut full_slots, root, &epoch_schedule, @@ -682,9 +682,9 @@ mod test { .into_iter() .flat_map(|(shreds, _)| shreds) .collect(); - blocktree.insert_shreds(fork3_shreds, None, false).unwrap(); + blockstore.insert_shreds(fork3_shreds, None, false).unwrap(); RepairService::get_completed_slots_past_root( - &blocktree, + &blockstore, &mut full_slots, root, &epoch_schedule, @@ -692,25 +692,25 @@ mod test { expected.insert(last_slot); assert_eq!(full_slots, expected); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_update_epoch_slots() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - // Create blocktree - let (blocktree, _, completed_slots_receiver) = - Blocktree::open_with_signal(&blocktree_path).unwrap(); + // Create blockstore + let (blockstore, _, completed_slots_receiver) = + Blockstore::open_with_signal(&blockstore_path).unwrap(); - let blocktree = Arc::new(blocktree); + let blockstore = Arc::new(blockstore); let mut root = 0; let num_slots = 100; let entries_per_slot = 5; - let blocktree_ = blocktree.clone(); + let blockstore_ = blockstore.clone(); - // Spin up thread to write to blocktree + // Spin up thread to write to blockstore let writer = Builder::new() .name("writer".to_string()) .spawn(move || { @@ -729,7 +729,7 @@ mod test { let step = rng.gen_range(1, max_step + 1) as usize; let step = std::cmp::min(step, num_shreds - i); let shreds_to_insert = shreds.drain(..step).collect_vec(); - blocktree_ + blockstore_ .insert_shreds(shreds_to_insert, None, false) .unwrap(); sleep(Duration::from_millis(repair_interval_ms)); @@ -748,7 +748,7 @@ mod test { RepairService::update_epoch_slots( Pubkey::default(), root, - blocktree.lowest_slot(), + blockstore.lowest_slot(), &mut root.clone(), &mut completed_slots, &cluster_info, @@ -762,7 +762,7 @@ mod test { // Update with new root, should filter out the slots <= root root = num_slots / 2; let (shreds, _) = make_slot_entries(num_slots + 2, num_slots + 1, entries_per_slot); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); RepairService::update_epoch_slots( Pubkey::default(), root, @@ -777,7 +777,7 @@ mod test { assert_eq!(completed_slots, expected); writer.join().unwrap(); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index a8831e2ae8080c..fa2a2699552cd2 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -12,8 +12,8 @@ use solana_ledger::entry::EntryVerificationStatus; use solana_ledger::{ bank_forks::BankForks, block_error::BlockError, - blocktree::{Blocktree, BlocktreeError}, - blocktree_processor::{self, TransactionStatusSender}, + blockstore::{Blockstore, BlockstoreError}, + blockstore_processor::{self, TransactionStatusSender}, entry::{Entry, EntrySlice, VerifyRecyclers}, leader_schedule_cache::LeaderScheduleCache, snapshot_package::SnapshotPackageSender, @@ -180,7 +180,7 @@ impl ReplayStage { #[allow(clippy::new_ret_no_self)] pub fn new( config: ReplayStageConfig, - blocktree: Arc, + blockstore: Arc, bank_forks: Arc>, cluster_info: Arc>, ledger_signal_receiver: Receiver, @@ -237,7 +237,7 @@ impl ReplayStage { let start = allocated.get(); Self::generate_new_bank_forks( - &blocktree, + &blockstore, &bank_forks, &leader_schedule_cache, &subscriptions, @@ -255,7 +255,7 @@ impl ReplayStage { let start = allocated.get(); let did_complete_bank = Self::replay_active_banks( - &blocktree, + &blockstore, &bank_forks, &my_pubkey, &mut progress, @@ -311,7 +311,7 @@ impl ReplayStage { &vote_account, &voting_keypair, &cluster_info, - &blocktree, + &blockstore, &leader_schedule_cache, &root_bank_sender, stats.total_staked, @@ -328,7 +328,7 @@ impl ReplayStage { if last_reset != bank.last_blockhash() { Self::reset_poh_recorder( &my_pubkey, - &blocktree, + &blockstore, &bank, &poh_recorder, &leader_schedule_cache, @@ -409,7 +409,7 @@ impl ReplayStage { match result { Err(RecvTimeoutError::Timeout) => continue, Err(_) => break, - Ok(_) => trace!("blocktree signal"), + Ok(_) => trace!("blockstore signal"), }; } Ok(()) @@ -535,16 +535,16 @@ impl ReplayStage { !Bank::can_commit(&tx_error) } Err(Error::BlockError(_)) => true, - Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_))) => true, - Err(Error::BlocktreeError(BlocktreeError::DeadSlot)) => true, + Err(Error::BlockstoreError(BlockstoreError::InvalidShredData(_))) => true, + Err(Error::BlockstoreError(BlockstoreError::DeadSlot)) => true, _ => false, } } // Returns the replay result and the number of replayed transactions - fn replay_blocktree_into_bank( + fn replay_blockstore_into_bank( bank: &Arc, - blocktree: &Blocktree, + blockstore: &Blockstore, bank_progress: &mut ForkProgress, transaction_status_sender: Option, verify_recyclers: &VerifyRecyclers, @@ -552,7 +552,7 @@ impl ReplayStage { let mut tx_count = 0; let now = Instant::now(); let load_result = - Self::load_blocktree_entries_with_shred_info(bank, blocktree, bank_progress); + Self::load_blockstore_entries_with_shred_info(bank, blockstore, bank_progress); let fetch_entries_elapsed = now.elapsed().as_micros(); if load_result.is_err() { bank_progress.stats.fetch_entries_fail_elapsed += fetch_entries_elapsed as u64; @@ -591,17 +591,17 @@ impl ReplayStage { ("error", format!("error: {:?}", replay_result), String), ("slot", bank.slot(), i64) ); - Self::mark_dead_slot(bank.slot(), blocktree, bank_progress); + Self::mark_dead_slot(bank.slot(), blockstore, bank_progress); } (replay_result, tx_count) } - fn mark_dead_slot(slot: Slot, blocktree: &Blocktree, bank_progress: &mut ForkProgress) { + fn mark_dead_slot(slot: Slot, blockstore: &Blockstore, bank_progress: &mut ForkProgress) { bank_progress.is_dead = true; - blocktree + blockstore .set_dead_slot(slot) - .expect("Failed to mark slot as dead in blocktree"); + .expect("Failed to mark slot as dead in blockstore"); } #[allow(clippy::too_many_arguments)] @@ -613,7 +613,7 @@ impl ReplayStage { vote_account: &Pubkey, voting_keypair: &Option>, cluster_info: &Arc>, - blocktree: &Arc, + blockstore: &Arc, leader_schedule_cache: &Arc, root_bank_sender: &Sender>>, total_staked: u64, @@ -637,12 +637,12 @@ impl ReplayStage { let mut rooted_banks = root_bank.parents(); rooted_banks.push(root_bank); let rooted_slots: Vec<_> = rooted_banks.iter().map(|bank| bank.slot()).collect(); - // Call leader schedule_cache.set_root() before blocktree.set_root() because + // Call leader schedule_cache.set_root() before blockstore.set_root() because // bank_forks.root is consumed by repair_service to update gossip, so we don't want to // get shreds for repair on gossip before we update leader schedule, otherwise they may // get dropped. leader_schedule_cache.set_root(rooted_banks.last().unwrap()); - blocktree + blockstore .set_roots(&rooted_slots) .expect("Ledger set roots failed"); bank_forks @@ -699,13 +699,17 @@ impl ReplayStage { fn reset_poh_recorder( my_pubkey: &Pubkey, - blocktree: &Blocktree, + blockstore: &Blockstore, bank: &Arc, poh_recorder: &Arc>, leader_schedule_cache: &Arc, ) { - let next_leader_slot = - leader_schedule_cache.next_leader_slot(&my_pubkey, bank.slot(), &bank, Some(blocktree)); + let next_leader_slot = leader_schedule_cache.next_leader_slot( + &my_pubkey, + bank.slot(), + &bank, + Some(blockstore), + ); poh_recorder .lock() .unwrap() @@ -727,7 +731,7 @@ impl ReplayStage { } fn replay_active_banks( - blocktree: &Arc, + blockstore: &Arc, bank_forks: &Arc>, my_pubkey: &Pubkey, progress: &mut HashMap, @@ -756,9 +760,9 @@ impl ReplayStage { .entry(bank.slot()) .or_insert_with(|| ForkProgress::new(bank.slot(), bank.last_blockhash())); if bank.collector_id() != my_pubkey { - let (replay_result, replay_tx_count) = Self::replay_blocktree_into_bank( + let (replay_result, replay_tx_count) = Self::replay_blockstore_into_bank( &bank, - &blocktree, + &blockstore, bank_progress, transaction_status_sender.clone(), verify_recyclers, @@ -959,12 +963,12 @@ impl ReplayStage { } } - fn load_blocktree_entries_with_shred_info( + fn load_blockstore_entries_with_shred_info( bank: &Bank, - blocktree: &Blocktree, + blockstore: &Blockstore, bank_progress: &mut ForkProgress, ) -> Result<(Vec, usize, bool)> { - blocktree + blockstore .get_slot_entries_with_shred_info(bank.slot(), bank_progress.num_shreds as u64) .map_err(|err| err.into()) } @@ -1078,7 +1082,7 @@ impl ReplayStage { let mut replay_elapsed = Measure::start("replay_elapsed"); let res = - blocktree_processor::process_entries(bank, entries, true, transaction_status_sender); + blockstore_processor::process_entries(bank, entries, true, transaction_status_sender); replay_elapsed.stop(); bank_progress.stats.replay_elapsed += replay_elapsed.as_us(); @@ -1116,7 +1120,7 @@ impl ReplayStage { } fn generate_new_bank_forks( - blocktree: &Blocktree, + blockstore: &Blockstore, forks_lock: &RwLock, leader_schedule_cache: &Arc, subscriptions: &Arc, @@ -1125,7 +1129,7 @@ impl ReplayStage { let forks = forks_lock.read().unwrap(); let frozen_banks = forks.frozen_banks(); let frozen_bank_slots: Vec = frozen_banks.keys().cloned().collect(); - let next_slots = blocktree + let next_slots = blockstore .get_slots_since(&frozen_bank_slots) .expect("Db error"); // Filter out what we've already seen @@ -1188,8 +1192,8 @@ pub(crate) mod tests { use crossbeam_channel::unbounded; use solana_client::rpc_request::RpcEncodedTransaction; use solana_ledger::{ - blocktree::make_slot_entries, - blocktree::{entries_to_test_shreds, BlocktreeError}, + blockstore::make_slot_entries, + blockstore::{entries_to_test_shreds, BlockstoreError}, create_new_tmp_ledger, entry::{self, next_entry}, get_tmp_ledger_path, @@ -1499,8 +1503,9 @@ pub(crate) mod tests { fn test_child_slots_of_same_parent() { let ledger_path = get_tmp_ledger_path!(); { - let blocktree = Arc::new( - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), + let blockstore = Arc::new( + Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"), ); let genesis_config = create_genesis_config(10_000).genesis_config; @@ -1512,11 +1517,11 @@ pub(crate) mod tests { // Insert shred for slot 1, generate new forks, check result let (shreds, _) = make_slot_entries(1, 0, 8); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); assert!(bank_forks.get(1).is_none()); let bank_forks = RwLock::new(bank_forks); ReplayStage::generate_new_bank_forks( - &blocktree, + &blockstore, &bank_forks, &leader_schedule_cache, &subscriptions, @@ -1525,10 +1530,10 @@ pub(crate) mod tests { // Insert shred for slot 3, generate new forks, check result let (shreds, _) = make_slot_entries(2, 0, 8); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); assert!(bank_forks.read().unwrap().get(2).is_none()); ReplayStage::generate_new_bank_forks( - &blocktree, + &blockstore, &bank_forks, &leader_schedule_cache, &subscriptions, @@ -1750,7 +1755,7 @@ pub(crate) mod tests { assert_matches!( res, - Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_))) + Err(Error::BlockstoreError(BlockstoreError::InvalidShredData(_))) ); } @@ -1762,8 +1767,9 @@ pub(crate) mod tests { { let ledger_path = get_tmp_ledger_path!(); let res = { - let blocktree = Arc::new( - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), + let blockstore = Arc::new( + Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"), ); let GenesisConfigInfo { mut genesis_config, @@ -1778,10 +1784,10 @@ pub(crate) mod tests { .entry(bank0.slot()) .or_insert_with(|| ForkProgress::new(0, last_blockhash)); let shreds = shred_to_insert(&mint_keypair, bank0.clone()); - blocktree.insert_shreds(shreds, None, false).unwrap(); - let (res, _tx_count) = ReplayStage::replay_blocktree_into_bank( + blockstore.insert_shreds(shreds, None, false).unwrap(); + let (res, _tx_count) = ReplayStage::replay_blockstore_into_bank( &bank0, - &blocktree, + &blockstore, &mut bank0_progress, None, &VerifyRecyclers::default(), @@ -1793,8 +1799,8 @@ pub(crate) mod tests { .map(|b| b.is_dead) .unwrap_or(false)); - // Check that the erroring bank was marked as dead in blocktree - assert!(blocktree.is_dead(bank0.slot())); + // Check that the erroring bank was marked as dead in blockstore + assert!(blockstore.is_dead(bank0.slot())); res }; let _ignored = remove_dir_all(&ledger_path); @@ -1902,11 +1908,11 @@ pub(crate) mod tests { ); } - pub fn create_test_transactions_and_populate_blocktree( + pub fn create_test_transactions_and_populate_blockstore( keypairs: Vec<&Keypair>, previous_slot: Slot, bank: Arc, - blocktree: Arc, + blockstore: Arc, ) -> Vec { let mint_keypair = keypairs[0]; let keypair1 = keypairs[1]; @@ -1933,19 +1939,19 @@ pub(crate) mod tests { let entries = vec![entry_1, entry_2, entry_3]; let shreds = entries_to_test_shreds(entries.clone(), slot, previous_slot, true, 0); - blocktree.insert_shreds(shreds, None, false).unwrap(); - blocktree.set_roots(&[slot]).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); + blockstore.set_roots(&[slot]).unwrap(); let (transaction_status_sender, transaction_status_receiver) = unbounded(); let transaction_status_service = TransactionStatusService::new( transaction_status_receiver, - blocktree.clone(), + blockstore.clone(), &Arc::new(AtomicBool::new(false)), ); // Check that process_entries successfully writes can_commit transactions statuses, and // that they are matched properly by get_confirmed_block - let _result = blocktree_processor::process_entries( + let _result = blockstore_processor::process_entries( &bank, &entries, true, @@ -1966,9 +1972,9 @@ pub(crate) mod tests { } = create_genesis_config(1000); let (ledger_path, _) = create_new_tmp_ledger!(&genesis_config); { - let blocktree = Blocktree::open(&ledger_path) + let blockstore = Blockstore::open(&ledger_path) .expect("Expected to successfully open database ledger"); - let blocktree = Arc::new(blocktree); + let blockstore = Arc::new(blockstore); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); @@ -1982,14 +1988,14 @@ pub(crate) mod tests { let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1)); let slot = bank1.slot(); - let signatures = create_test_transactions_and_populate_blocktree( + let signatures = create_test_transactions_and_populate_blockstore( vec![&mint_keypair, &keypair1, &keypair2, &keypair3], bank0.slot(), bank1, - blocktree.clone(), + blockstore.clone(), ); - let confirmed_block = blocktree.get_confirmed_block(slot, None).unwrap(); + let confirmed_block = blockstore.get_confirmed_block(slot, None).unwrap(); assert_eq!(confirmed_block.transactions.len(), 3); for (transaction, result) in confirmed_block.transactions.into_iter() { @@ -2010,6 +2016,6 @@ pub(crate) mod tests { } } } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } } diff --git a/core/src/result.rs b/core/src/result.rs index 56bf34f5f8ade9..942026d3742448 100644 --- a/core/src/result.rs +++ b/core/src/result.rs @@ -3,7 +3,7 @@ use crate::cluster_info; use crate::poh_recorder; use solana_ledger::block_error; -use solana_ledger::blocktree; +use solana_ledger::blockstore; use solana_ledger::snapshot_utils; use solana_sdk::transaction; use std::any::Any; @@ -27,7 +27,7 @@ pub enum Error { SendError, PohRecorderError(poh_recorder::PohRecorderError), BlockError(block_error::BlockError), - BlocktreeError(blocktree::BlocktreeError), + BlockstoreError(blockstore::BlockstoreError), FsExtra(fs_extra::error::Error), SnapshotError(snapshot_utils::SnapshotError), } @@ -127,9 +127,9 @@ impl std::convert::From for Error { Error::PohRecorderError(e) } } -impl std::convert::From for Error { - fn from(e: blocktree::BlocktreeError) -> Error { - Error::BlocktreeError(e) +impl std::convert::From for Error { + fn from(e: blockstore::BlockstoreError) -> Error { + Error::BlockstoreError(e) } } impl std::convert::From for Error { diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index c3bf504e5aff00..880b943b6f01b5 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -12,7 +12,7 @@ use crate::{ use crossbeam_channel::Receiver as CrossbeamReceiver; use solana_ledger::{ bank_forks::BankForks, - blocktree::{Blocktree, CompletedSlotsReceiver}, + blockstore::{Blockstore, CompletedSlotsReceiver}, leader_schedule_cache::LeaderScheduleCache, staking_utils, }; @@ -205,7 +205,7 @@ impl RetransmitStage { pub fn new( bank_forks: Arc>, leader_schedule_cache: &Arc, - blocktree: Arc, + blockstore: Arc, cluster_info: &Arc>, retransmit_sockets: Arc>, repair_socket: Arc, @@ -234,7 +234,7 @@ impl RetransmitStage { }; let leader_schedule_cache = leader_schedule_cache.clone(); let window_service = WindowService::new( - blocktree, + blockstore, cluster_info.clone(), verified_receiver, retransmit_sender, @@ -281,7 +281,7 @@ mod tests { use crate::contact_info::ContactInfo; use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use crate::packet::{self, Meta, Packet, Packets}; - use solana_ledger::blocktree_processor::{process_blocktree, ProcessOptions}; + use solana_ledger::blockstore_processor::{process_blockstore, ProcessOptions}; use solana_ledger::create_new_tmp_ledger; use solana_net_utils::find_available_port_in_range; use solana_sdk::pubkey::Pubkey; @@ -290,13 +290,13 @@ mod tests { fn test_skip_repair() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let opts = ProcessOptions { full_leader_cache: true, ..ProcessOptions::default() }; let (bank_forks, _, cached_leader_schedule) = - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap(); let leader_schedule_cache = Arc::new(cached_leader_schedule); let bank_forks = Arc::new(RwLock::new(bank_forks)); diff --git a/core/src/rpc.rs b/core/src/rpc.rs index 5c9099b04b34f5..80d1a550813755 100644 --- a/core/src/rpc.rs +++ b/core/src/rpc.rs @@ -18,7 +18,7 @@ use solana_client::rpc_request::{ }; use solana_faucet::faucet::request_airdrop_transaction; use solana_ledger::{ - bank_forks::BankForks, blocktree::Blocktree, rooted_slot_iterator::RootedSlotIterator, + bank_forks::BankForks, blockstore::Blockstore, rooted_slot_iterator::RootedSlotIterator, }; use solana_runtime::bank::Bank; use solana_sdk::{ @@ -69,7 +69,7 @@ impl Default for JsonRpcConfig { pub struct JsonRpcRequestProcessor { bank_forks: Arc>, block_commitment_cache: Arc>, - blocktree: Arc, + blockstore: Arc, config: JsonRpcConfig, storage_state: StorageState, validator_exit: Arc>>, @@ -94,7 +94,7 @@ impl JsonRpcRequestProcessor { config: JsonRpcConfig, bank_forks: Arc>, block_commitment_cache: Arc>, - blocktree: Arc, + blockstore: Arc, storage_state: StorageState, validator_exit: Arc>>, ) -> Self { @@ -102,7 +102,7 @@ impl JsonRpcRequestProcessor { config, bank_forks, block_commitment_cache, - blocktree, + blockstore, storage_state, validator_exit, } @@ -318,7 +318,7 @@ impl JsonRpcRequestProcessor { slot: Slot, encoding: Option, ) -> Result> { - Ok(self.blocktree.get_confirmed_block(slot, encoding).ok()) + Ok(self.blockstore.get_confirmed_block(slot, encoding).ok()) } pub fn get_confirmed_blocks( @@ -331,9 +331,9 @@ impl JsonRpcRequestProcessor { return Ok(vec![]); } - let start_slot = (start_slot..end_slot).find(|&slot| self.blocktree.is_root(slot)); + let start_slot = (start_slot..end_slot).find(|&slot| self.blockstore.is_root(slot)); if let Some(start_slot) = start_slot { - let mut slots: Vec = RootedSlotIterator::new(start_slot, &self.blocktree) + let mut slots: Vec = RootedSlotIterator::new(start_slot, &self.blockstore) .unwrap() .map(|(slot, _)| slot) .collect(); @@ -349,14 +349,14 @@ impl JsonRpcRequestProcessor { // genesis (ie. that this bank's slot_per_year will be applicable to any rooted slot being // queried). If these values will be variable in the future, those timing parameters will // need to be stored persistently, and the slot_duration calculation will likely need to be - // moved upstream into blocktree. Also, an explicit commitment level will need to be set. + // moved upstream into blockstore. Also, an explicit commitment level will need to be set. let bank = self.bank(None); let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year()); let epoch = bank.epoch_schedule().get_epoch(slot); let stakes = HashMap::new(); let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes); - Ok(self.blocktree.get_block_time(slot, slot_duration, stakes)) + Ok(self.blockstore.get_block_time(slot, slot_duration, stakes)) } } @@ -1068,13 +1068,13 @@ pub mod tests { use crate::{ contact_info::ContactInfo, genesis_utils::{create_genesis_config, GenesisConfigInfo}, - replay_stage::tests::create_test_transactions_and_populate_blocktree, + replay_stage::tests::create_test_transactions_and_populate_blockstore, }; use bincode::deserialize; use jsonrpc_core::{MetaIoHandler, Output, Response, Value}; use solana_client::rpc_request::RpcEncodedTransaction; use solana_ledger::{ - blocktree::entries_to_test_shreds, blocktree_processor::fill_blocktree_slot_with_ticks, + blockstore::entries_to_test_shreds, blockstore_processor::fill_blockstore_slot_with_ticks, entry::next_entry_mut, get_tmp_ledger_path, }; use solana_sdk::{ @@ -1112,12 +1112,12 @@ pub mod tests { } fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler { - start_rpc_handler_with_tx_and_blocktree(pubkey, vec![], 0) + start_rpc_handler_with_tx_and_blockstore(pubkey, vec![], 0) } - fn start_rpc_handler_with_tx_and_blocktree( + fn start_rpc_handler_with_tx_and_blockstore( pubkey: &Pubkey, - blocktree_roots: Vec, + blockstore_roots: Vec, default_timestamp: i64, ) -> RpcHandler { let (bank_forks, alice, leader_vote_keypair) = new_bank_forks(); @@ -1135,21 +1135,21 @@ pub mod tests { let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42))); let ledger_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&ledger_path).unwrap(); - let blocktree = Arc::new(blocktree); + let blockstore = Blockstore::open(&ledger_path).unwrap(); + let blockstore = Arc::new(blockstore); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); bank.transfer(4, &alice, &keypair2.pubkey()).unwrap(); - let confirmed_block_signatures = create_test_transactions_and_populate_blocktree( + let confirmed_block_signatures = create_test_transactions_and_populate_blockstore( vec![&alice, &keypair1, &keypair2, &keypair3], 0, bank.clone(), - blocktree.clone(), + blockstore.clone(), ); - // Add timestamp vote to blocktree + // Add timestamp vote to blockstore let vote = Vote { slots: vec![1], hash: Hash::default(), @@ -1172,10 +1172,10 @@ pub mod tests { true, 0, ); - blocktree.insert_shreds(shreds, None, false).unwrap(); - blocktree.set_roots(&[1]).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); + blockstore.set_roots(&[1]).unwrap(); - let mut roots = blocktree_roots.clone(); + let mut roots = blockstore_roots.clone(); if !roots.is_empty() { roots.retain(|&x| x > 1); let mut parent_bank = bank; @@ -1186,9 +1186,9 @@ pub mod tests { parent_bank.squash(); bank_forks.write().unwrap().set_root(*root, &None); let parent = if i > 0 { roots[i - 1] } else { 1 }; - fill_blocktree_slot_with_ticks(&blocktree, 5, *root, parent, Hash::default()); + fill_blockstore_slot_with_ticks(&blockstore, 5, *root, parent, Hash::default()); } - blocktree.set_roots(&roots).unwrap(); + blockstore.set_roots(&roots).unwrap(); let new_bank = Bank::new_from_parent( &parent_bank, parent_bank.collector_id(), @@ -1214,7 +1214,7 @@ pub mod tests { JsonRpcConfig::default(), bank_forks.clone(), block_commitment_cache.clone(), - blocktree, + blockstore, StorageState::default(), validator_exit, ))); @@ -1261,12 +1261,12 @@ pub mod tests { let bank = bank_forks.read().unwrap().working_bank(); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let ledger_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let request_processor = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), bank_forks, block_commitment_cache, - Arc::new(blocktree), + Arc::new(blockstore), StorageState::default(), validator_exit, ); @@ -1752,7 +1752,7 @@ pub mod tests { let validator_exit = create_validator_exit(&exit); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let ledger_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let mut io = MetaIoHandler::default(); let rpc = RpcSolImpl; @@ -1763,7 +1763,7 @@ pub mod tests { JsonRpcConfig::default(), new_bank_forks().0, block_commitment_cache, - Arc::new(blocktree), + Arc::new(blockstore), StorageState::default(), validator_exit, ); @@ -1856,12 +1856,12 @@ pub mod tests { let validator_exit = create_validator_exit(&exit); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let ledger_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let request_processor = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), new_bank_forks().0, block_commitment_cache, - Arc::new(blocktree), + Arc::new(blockstore), StorageState::default(), validator_exit, ); @@ -1875,14 +1875,14 @@ pub mod tests { let validator_exit = create_validator_exit(&exit); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let ledger_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let mut config = JsonRpcConfig::default(); config.enable_validator_exit = true; let request_processor = JsonRpcRequestProcessor::new( config, new_bank_forks().0, block_commitment_cache, - Arc::new(blocktree), + Arc::new(blockstore), StorageState::default(), validator_exit, ); @@ -1927,7 +1927,7 @@ pub mod tests { let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42))); let ledger_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let mut config = JsonRpcConfig::default(); config.enable_validator_exit = true; @@ -1935,7 +1935,7 @@ pub mod tests { config, new_bank_forks().0, block_commitment_cache, - Arc::new(blocktree), + Arc::new(blockstore), StorageState::default(), validator_exit, ); @@ -2082,7 +2082,7 @@ pub mod tests { let bob_pubkey = Pubkey::new_rand(); let roots = vec![0, 1, 3, 4, 8]; let RpcHandler { io, meta, .. } = - start_rpc_handler_with_tx_and_blocktree(&bob_pubkey, roots.clone(), 0); + start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots.clone(), 0); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0]}}"#); @@ -2129,7 +2129,7 @@ pub mod tests { fn test_get_block_time() { let bob_pubkey = Pubkey::new_rand(); let base_timestamp = 1576183541; - let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blocktree( + let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blockstore( &bob_pubkey, vec![1, 2, 3, 4, 5, 6, 7], base_timestamp, diff --git a/core/src/rpc_service.rs b/core/src/rpc_service.rs index c9344d51d1fbde..bc4eb2b36cc97a 100644 --- a/core/src/rpc_service.rs +++ b/core/src/rpc_service.rs @@ -9,7 +9,7 @@ use jsonrpc_http_server::{ hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, RequestMiddleware, RequestMiddlewareAction, ServerBuilder, }; -use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree}; +use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore}; use solana_sdk::hash::Hash; use std::{ net::SocketAddr, @@ -91,7 +91,7 @@ impl JsonRpcService { config: JsonRpcConfig, bank_forks: Arc>, block_commitment_cache: Arc>, - blocktree: Arc, + blockstore: Arc, cluster_info: Arc>, genesis_hash: Hash, ledger_path: &Path, @@ -104,7 +104,7 @@ impl JsonRpcService { config, bank_forks, block_commitment_cache, - blocktree, + blockstore, storage_state, validator_exit.clone(), ))); @@ -204,13 +204,13 @@ mod tests { let bank_forks = Arc::new(RwLock::new(BankForks::new(bank.slot(), bank))); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let ledger_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let mut rpc_service = JsonRpcService::new( rpc_addr, JsonRpcConfig::default(), bank_forks, block_commitment_cache, - Arc::new(blocktree), + Arc::new(blockstore), cluster_info, Hash::default(), &PathBuf::from("farf"), diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index bb9949a4c2971f..6bdd813fd48cd7 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -2,7 +2,7 @@ use crate::packet::{Packet, PacketsRecycler}; use crate::streamer::{self, PacketReceiver, PacketSender}; -use solana_ledger::blocktree::MAX_DATA_SHREDS_PER_SLOT; +use solana_ledger::blockstore::MAX_DATA_SHREDS_PER_SLOT; use solana_ledger::shred::{OFFSET_OF_SHRED_INDEX, SIZE_OF_SHRED_INDEX}; use solana_perf::cuda_runtime::PinnedVec; use solana_perf::packet::limited_deserialize; diff --git a/core/src/storage_stage.rs b/core/src/storage_stage.rs index 248e1b5af578f3..146e0110652aa8 100644 --- a/core/src/storage_stage.rs +++ b/core/src/storage_stage.rs @@ -10,7 +10,7 @@ use crate::{ }; use rand::{Rng, SeedableRng}; use rand_chacha::ChaChaRng; -use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree}; +use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore}; use solana_runtime::{bank::Bank, storage_utils::archiver_accounts}; use solana_sdk::{ account::Account, @@ -177,7 +177,7 @@ impl StorageStage { pub fn new( storage_state: &StorageState, bank_receiver: Receiver>>, - blocktree: Option>, + blockstore: Option>, keypair: &Arc, storage_keypair: &Arc, exit: &Arc, @@ -197,12 +197,12 @@ impl StorageStage { let mut current_key = 0; let mut storage_slots = StorageSlots::default(); loop { - if let Some(ref some_blocktree) = blocktree { + if let Some(ref some_blockstore) = blockstore { if let Err(e) = Self::process_entries( &storage_keypair, &storage_state_inner, &bank_receiver, - &some_blocktree, + &some_blockstore, &mut storage_slots, &mut current_key, slots_per_turn, @@ -368,7 +368,7 @@ impl StorageStage { fn process_turn( storage_keypair: &Arc, state: &Arc>, - blocktree: &Arc, + blockstore: &Arc, blockhash: Hash, slot: Slot, slots_per_segment: u64, @@ -431,7 +431,7 @@ impl StorageStage { let mut statew = state.write().unwrap(); match chacha_cbc_encrypt_file_many_keys( - blocktree, + blockstore, segment as u64, statew.slots_per_segment, &mut statew.storage_keys, @@ -502,7 +502,7 @@ impl StorageStage { storage_keypair: &Arc, storage_state: &Arc>, bank_receiver: &Receiver>>, - blocktree: &Arc, + blockstore: &Arc, storage_slots: &mut StorageSlots, current_key_idx: &mut usize, slots_per_turn: u64, @@ -541,7 +541,7 @@ impl StorageStage { let _ignored = Self::process_turn( &storage_keypair, &storage_state, - &blocktree, + &blockstore, bank.last_blockhash(), bank.slot(), bank.slots_per_segment(), diff --git a/core/src/tpu.rs b/core/src/tpu.rs index cfb29935fee2dc..62d1c9961c3fb3 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -12,7 +12,7 @@ use crate::{ sigverify_stage::{DisabledSigVerifier, SigVerifyStage}, }; use crossbeam_channel::unbounded; -use solana_ledger::{blocktree::Blocktree, blocktree_processor::TransactionStatusSender}; +use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusSender}; use std::{ net::UdpSocket, sync::{ @@ -42,7 +42,7 @@ impl Tpu { broadcast_sockets: Vec, sigverify_disabled: bool, transaction_status_sender: Option, - blocktree: &Arc, + blockstore: &Arc, broadcast_type: &BroadcastStageType, exit: &Arc, shred_version: u16, @@ -87,7 +87,7 @@ impl Tpu { cluster_info.clone(), entry_receiver, &exit, - blocktree, + blockstore, shred_version, ); diff --git a/core/src/transaction_status_service.rs b/core/src/transaction_status_service.rs index c9ea451bca580d..9386b90928f97f 100644 --- a/core/src/transaction_status_service.rs +++ b/core/src/transaction_status_service.rs @@ -1,6 +1,6 @@ use crossbeam_channel::{Receiver, RecvTimeoutError}; use solana_client::rpc_request::RpcTransactionStatus; -use solana_ledger::{blocktree::Blocktree, blocktree_processor::TransactionStatusBatch}; +use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusBatch}; use solana_runtime::bank::{Bank, HashAgeKind}; use std::{ sync::{ @@ -19,7 +19,7 @@ impl TransactionStatusService { #[allow(clippy::new_ret_no_self)] pub fn new( write_transaction_status_receiver: Receiver, - blocktree: Arc, + blockstore: Arc, exit: &Arc, ) -> Self { let exit = exit.clone(); @@ -31,7 +31,7 @@ impl TransactionStatusService { } if let Err(RecvTimeoutError::Disconnected) = Self::write_transaction_status_batch( &write_transaction_status_receiver, - &blocktree, + &blockstore, ) { break; } @@ -42,7 +42,7 @@ impl TransactionStatusService { fn write_transaction_status_batch( write_transaction_status_receiver: &Receiver, - blocktree: &Arc, + blockstore: &Arc, ) -> Result<(), RecvTimeoutError> { let TransactionStatusBatch { bank, @@ -68,7 +68,7 @@ impl TransactionStatusService { .get_fee_calculator(&fee_hash) .expect("FeeCalculator must exist"); let fee = fee_calculator.calculate_fee(transaction.message()); - blocktree + blockstore .write_transaction_status( (slot, transaction.signatures[0]), &RpcTransactionStatus { diff --git a/core/src/tvu.rs b/core/src/tvu.rs index d4da19680bc190..09c72318ab0fcf 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -21,8 +21,8 @@ use crossbeam_channel::unbounded; use solana_ledger::leader_schedule_cache::LeaderScheduleCache; use solana_ledger::{ bank_forks::BankForks, - blocktree::{Blocktree, CompletedSlotsReceiver}, - blocktree_processor::TransactionStatusSender, + blockstore::{Blockstore, CompletedSlotsReceiver}, + blockstore_processor::TransactionStatusSender, }; use solana_sdk::{ pubkey::Pubkey, @@ -63,7 +63,7 @@ impl Tvu { /// # Arguments /// * `cluster_info` - The cluster_info state. /// * `sockets` - fetch, repair, and retransmit sockets - /// * `blocktree` - the ledger itself + /// * `blockstore` - the ledger itself #[allow(clippy::new_ret_no_self, clippy::too_many_arguments)] pub fn new( vote_account: &Pubkey, @@ -72,7 +72,7 @@ impl Tvu { bank_forks: &Arc>, cluster_info: &Arc>, sockets: Sockets, - blocktree: Arc, + blockstore: Arc, storage_state: &StorageState, blockstream_unix_socket: Option<&PathBuf>, max_ledger_slots: Option, @@ -133,7 +133,7 @@ impl Tvu { let retransmit_stage = RetransmitStage::new( bank_forks.clone(), leader_schedule_cache, - blocktree.clone(), + blockstore.clone(), &cluster_info, Arc::new(retransmit_sockets), repair_socket, @@ -175,7 +175,7 @@ impl Tvu { let (replay_stage, root_bank_receiver) = ReplayStage::new( replay_stage_config, - blocktree.clone(), + blockstore.clone(), bank_forks.clone(), cluster_info.clone(), ledger_signal_receiver, @@ -185,7 +185,7 @@ impl Tvu { let blockstream_service = if let Some(blockstream_unix_socket) = blockstream_unix_socket { let blockstream_service = BlockstreamService::new( blockstream_slot_receiver, - blocktree.clone(), + blockstore.clone(), blockstream_unix_socket, &exit, ); @@ -197,7 +197,7 @@ impl Tvu { let ledger_cleanup_service = max_ledger_slots.map(|max_ledger_slots| { LedgerCleanupService::new( ledger_cleanup_slot_receiver, - blocktree.clone(), + blockstore.clone(), max_ledger_slots, &exit, ) @@ -206,7 +206,7 @@ impl Tvu { let storage_stage = StorageStage::new( storage_state, root_bank_receiver, - Some(blocktree), + Some(blockstore), &keypair, storage_keypair, &exit, @@ -272,14 +272,14 @@ pub mod tests { cluster_info1.insert_info(leader.info.clone()); let cref1 = Arc::new(RwLock::new(cluster_info1)); - let (blocktree_path, _) = create_new_tmp_ledger!(&genesis_config); - let (blocktree, l_receiver, completed_slots_receiver) = - Blocktree::open_with_signal(&blocktree_path) + let (blockstore_path, _) = create_new_tmp_ledger!(&genesis_config); + let (blockstore, l_receiver, completed_slots_receiver) = + Blockstore::open_with_signal(&blockstore_path) .expect("Expected to successfully open ledger"); - let blocktree = Arc::new(blocktree); + let blockstore = Arc::new(blockstore); let bank = bank_forks.working_bank(); let (exit, poh_recorder, poh_service, _entry_receiver) = - create_test_recorder(&bank, &blocktree, None); + create_test_recorder(&bank, &blockstore, None); let voting_keypair = Keypair::new(); let storage_keypair = Arc::new(Keypair::new()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); @@ -298,7 +298,7 @@ pub mod tests { forwards: target1.sockets.tvu_forwards, } }, - blocktree, + blockstore, &StorageState::default(), None, None, diff --git a/core/src/validator.rs b/core/src/validator.rs index 27a6f115863261..856eda52b8ed8d 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -23,8 +23,8 @@ use crossbeam_channel::unbounded; use solana_ledger::{ bank_forks::{BankForks, SnapshotConfig}, bank_forks_utils, - blocktree::{Blocktree, CompletedSlotsReceiver}, - blocktree_processor::{self, BankForksInfo}, + blockstore::{Blockstore, CompletedSlotsReceiver}, + blockstore_processor::{self, BankForksInfo}, create_new_tmp_ledger, leader_schedule::FixedSchedule, leader_schedule_cache::LeaderScheduleCache, @@ -156,12 +156,12 @@ impl Validator { genesis_hash, bank_forks, bank_forks_info, - blocktree, + blockstore, ledger_signal_receiver, completed_slots_receiver, leader_schedule_cache, poh_config, - ) = new_banks_from_blocktree( + ) = new_banks_from_blockstore( config.expected_genesis_hash, ledger_path, config.account_paths.clone(), @@ -197,7 +197,7 @@ impl Validator { bank.slots_per_segment(), ); - let blocktree = Arc::new(blocktree); + let blockstore = Arc::new(blockstore); let rpc_service = if node.info.rpc.port() == 0 { None @@ -207,7 +207,7 @@ impl Validator { config.rpc_config.clone(), bank_forks.clone(), block_commitment_cache.clone(), - blocktree.clone(), + blockstore.clone(), cluster_info.clone(), genesis_hash, ledger_path, @@ -237,7 +237,7 @@ impl Validator { Some(transaction_status_sender), Some(TransactionStatusService::new( transaction_status_receiver, - blocktree.clone(), + blockstore.clone(), &exit, )), ) @@ -265,11 +265,11 @@ impl Validator { bank.tick_height(), bank.last_blockhash(), bank.slot(), - leader_schedule_cache.next_leader_slot(&id, bank.slot(), &bank, Some(&blocktree)), + leader_schedule_cache.next_leader_slot(&id, bank.slot(), &bank, Some(&blockstore)), bank.ticks_per_slot(), &id, - &blocktree, - blocktree.new_shreds_signals.first().cloned(), + &blockstore, + blockstore.new_shreds_signals.first().cloned(), &leader_schedule_cache, &poh_config, ); @@ -282,7 +282,7 @@ impl Validator { let gossip_service = GossipService::new( &cluster_info, - Some(blocktree.clone()), + Some(blockstore.clone()), Some(bank_forks.clone()), node.sockets.gossip, &exit, @@ -347,7 +347,7 @@ impl Validator { let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit); assert_eq!( - blocktree.new_shreds_signals.len(), + blockstore.new_shreds_signals.len(), 1, "New shred signal for the TVU should be the same as the clear bank signal." ); @@ -359,7 +359,7 @@ impl Validator { &bank_forks, &cluster_info, sockets, - blocktree.clone(), + blockstore.clone(), &storage_state, config.blockstream_unix_socket.as_ref(), config.max_ledger_slots, @@ -389,7 +389,7 @@ impl Validator { node.sockets.broadcast, config.dev_sigverify_disabled, transaction_status_sender, - &blocktree, + &blockstore, &config.broadcast_stage_type, &exit, shred_version, @@ -470,9 +470,9 @@ impl Validator { } } -pub fn new_banks_from_blocktree( +pub fn new_banks_from_blockstore( expected_genesis_hash: Option, - blocktree_path: &Path, + blockstore_path: &Path, account_paths: Vec, snapshot_config: Option, poh_verify: bool, @@ -482,14 +482,14 @@ pub fn new_banks_from_blocktree( Hash, BankForks, Vec, - Blocktree, + Blockstore, Receiver, CompletedSlotsReceiver, LeaderScheduleCache, PohConfig, ) { - let genesis_config = GenesisConfig::load(blocktree_path).unwrap_or_else(|err| { - error!("Failed to load genesis from {:?}: {}", blocktree_path, err); + let genesis_config = GenesisConfig::load(blockstore_path).unwrap_or_else(|err| { + error!("Failed to load genesis from {:?}: {}", blockstore_path, err); process::exit(1); }); let genesis_hash = genesis_config.hash(); @@ -500,24 +500,24 @@ pub fn new_banks_from_blocktree( error!("genesis hash mismatch: expected {}", expected_genesis_hash); error!( "Delete the ledger directory to continue: {:?}", - blocktree_path + blockstore_path ); process::exit(1); } } - let (blocktree, ledger_signal_receiver, completed_slots_receiver) = - Blocktree::open_with_signal(blocktree_path).expect("Failed to open ledger database"); + let (blockstore, ledger_signal_receiver, completed_slots_receiver) = + Blockstore::open_with_signal(blockstore_path).expect("Failed to open ledger database"); - let process_options = blocktree_processor::ProcessOptions { + let process_options = blockstore_processor::ProcessOptions { poh_verify, dev_halt_at_slot, - ..blocktree_processor::ProcessOptions::default() + ..blockstore_processor::ProcessOptions::default() }; let (mut bank_forks, bank_forks_info, mut leader_schedule_cache) = bank_forks_utils::load( &genesis_config, - &blocktree, + &blockstore, account_paths, snapshot_config.as_ref(), process_options, @@ -535,7 +535,7 @@ pub fn new_banks_from_blocktree( genesis_hash, bank_forks, bank_forks_info, - blocktree, + blockstore, ledger_signal_receiver, completed_slots_receiver, leader_schedule_cache, diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 756882fa887e07..7d563c87453980 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -1,5 +1,5 @@ //! `window_service` handles the data plane incoming shreds, storing them in -//! blocktree and retransmitting where required +//! blockstore and retransmitting where required //! use crate::cluster_info::ClusterInfo; use crate::packet::Packets; @@ -13,7 +13,7 @@ use rayon::iter::IntoParallelRefMutIterator; use rayon::iter::ParallelIterator; use rayon::ThreadPool; use solana_ledger::bank_forks::BankForks; -use solana_ledger::blocktree::{self, Blocktree, MAX_DATA_SHREDS_PER_SLOT}; +use solana_ledger::blockstore::{self, Blockstore, MAX_DATA_SHREDS_PER_SLOT}; use solana_ledger::leader_schedule_cache::LeaderScheduleCache; use solana_ledger::shred::Shred; use solana_metrics::{inc_new_counter_debug, inc_new_counter_error}; @@ -30,7 +30,7 @@ use std::time::{Duration, Instant}; fn verify_shred_slot(shred: &Shred, root: u64) -> bool { if shred.is_data() { // Only data shreds have parent information - blocktree::verify_shred_slots(shred.slot(), shred.parent(), root) + blockstore::verify_shred_slots(shred.slot(), shred.parent(), root) } else { // Filter out outdated coding shreds shred.slot() >= root @@ -75,7 +75,7 @@ pub fn should_retransmit_and_persist( fn run_insert( shred_receiver: &CrossbeamReceiver>, - blocktree: &Arc, + blockstore: &Arc, leader_schedule_cache: &Arc, ) -> Result<()> { let timer = Duration::from_millis(200); @@ -85,15 +85,15 @@ fn run_insert( shreds.append(&mut more_shreds) } - let blocktree_insert_metrics = - blocktree.insert_shreds(shreds, Some(leader_schedule_cache), false)?; - blocktree_insert_metrics.report_metrics("recv-window-insert-shreds"); + let blockstore_insert_metrics = + blockstore.insert_shreds(shreds, Some(leader_schedule_cache), false)?; + blockstore_insert_metrics.report_metrics("recv-window-insert-shreds"); Ok(()) } fn recv_window( - blocktree: &Arc, + blockstore: &Arc, insert_shred_sender: &CrossbeamSender>, my_pubkey: &Pubkey, verified_receiver: &CrossbeamReceiver>, @@ -117,7 +117,7 @@ where let now = Instant::now(); inc_new_counter_debug!("streamer-recv_window-recv", total_packets); - let last_root = blocktree.last_root(); + let last_root = blockstore.last_root(); let shreds: Vec<_> = thread_pool.install(|| { packets .par_iter_mut() @@ -138,7 +138,7 @@ where // get retransmitted. It'll allow peer nodes to see this shred // and trigger them to mark the slot as dead. if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 { - let _ = blocktree.set_dead_slot(shred.slot()); + let _ = blockstore.set_dead_slot(shred.slot()); } packet.meta.slot = shred.slot(); packet.meta.seed = shred.seed(); @@ -205,7 +205,7 @@ pub struct WindowService { impl WindowService { #[allow(clippy::too_many_arguments)] pub fn new( - blocktree: Arc, + blockstore: Arc, cluster_info: Arc>, verified_receiver: CrossbeamReceiver>, retransmit: PacketSender, @@ -227,7 +227,7 @@ impl WindowService { }; let repair_service = RepairService::new( - blocktree.clone(), + blockstore.clone(), exit.clone(), repair_socket, cluster_info.clone(), @@ -238,7 +238,7 @@ impl WindowService { let t_insert = Self::start_window_insert_thread( exit, - &blocktree, + &blockstore, leader_schedule_cache, insert_receiver, ); @@ -246,7 +246,7 @@ impl WindowService { let t_window = Self::start_recv_window_thread( cluster_info.read().unwrap().id(), exit, - &blocktree, + &blockstore, insert_sender, verified_receiver, shred_filter, @@ -263,12 +263,12 @@ impl WindowService { fn start_window_insert_thread( exit: &Arc, - blocktree: &Arc, + blockstore: &Arc, leader_schedule_cache: &Arc, insert_receiver: CrossbeamReceiver>, ) -> JoinHandle<()> { let exit = exit.clone(); - let blocktree = blocktree.clone(); + let blockstore = blockstore.clone(); let leader_schedule_cache = leader_schedule_cache.clone(); let mut handle_timeout = || {}; let handle_error = || { @@ -281,7 +281,7 @@ impl WindowService { break; } - if let Err(e) = run_insert(&insert_receiver, &blocktree, &leader_schedule_cache) { + if let Err(e) = run_insert(&insert_receiver, &blockstore, &leader_schedule_cache) { if Self::should_exit_on_error(e, &mut handle_timeout, &handle_error) { break; } @@ -293,7 +293,7 @@ impl WindowService { fn start_recv_window_thread( id: Pubkey, exit: &Arc, - blocktree: &Arc, + blockstore: &Arc, insert_sender: CrossbeamSender>, verified_receiver: CrossbeamReceiver>, shred_filter: F, @@ -307,7 +307,7 @@ impl WindowService { + std::marker::Sync, { let exit = exit.clone(); - let blocktree = blocktree.clone(); + let blockstore = blockstore.clone(); Builder::new() .name("solana-window".to_string()) .spawn(move || { @@ -334,7 +334,7 @@ impl WindowService { } }; if let Err(e) = recv_window( - &blocktree, + &blockstore, &insert_sender, &id, &verified_receiver, @@ -401,7 +401,7 @@ mod test { use rand::thread_rng; use solana_ledger::shred::DataShredHeader; use solana_ledger::{ - blocktree::{make_many_slot_entries, Blocktree}, + blockstore::{make_many_slot_entries, Blockstore}, entry::{create_ticks, Entry}, get_tmp_ledger_path, shred::Shredder, @@ -434,23 +434,23 @@ mod test { #[test] fn test_process_shred() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap()); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap()); let num_entries = 10; let original_entries = create_ticks(num_entries, 0, Hash::default()); let mut shreds = local_entries_to_shred(&original_entries, 0, 0, &Arc::new(Keypair::new())); shreds.reverse(); - blocktree + blockstore .insert_shreds(shreds, None, false) .expect("Expect successful processing of shred"); assert_eq!( - blocktree.get_slot_entries(0, 0, None).unwrap(), + blockstore.get_slot_entries(0, 0, None).unwrap(), original_entries ); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] @@ -529,18 +529,18 @@ mod test { verified_receiver: CrossbeamReceiver>, exit: Arc, ) -> WindowService { - let blocktree_path = get_tmp_ledger_path!(); - let (blocktree, _, _) = Blocktree::open_with_signal(&blocktree_path) + let blockstore_path = get_tmp_ledger_path!(); + let (blockstore, _, _) = Blockstore::open_with_signal(&blockstore_path) .expect("Expected to be able to open database ledger"); - let blocktree = Arc::new(blocktree); + let blockstore = Arc::new(blockstore); let (retransmit_sender, _retransmit_receiver) = channel(); let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair( ContactInfo::new_localhost(&Pubkey::default(), 0), ))); let repair_sock = Arc::new(UdpSocket::bind(socketaddr_any!()).unwrap()); let window = WindowService::new( - blocktree, + blockstore, cluster_info, verified_receiver, retransmit_sender, diff --git a/core/tests/ledger_cleanup.rs b/core/tests/ledger_cleanup.rs index f9be487e7d22f3..ea1903d08c0188 100644 --- a/core/tests/ledger_cleanup.rs +++ b/core/tests/ledger_cleanup.rs @@ -3,7 +3,7 @@ #[cfg(test)] mod tests { use solana_core::ledger_cleanup_service::LedgerCleanupService; - use solana_ledger::blocktree::{make_many_slot_entries, Blocktree}; + use solana_ledger::blockstore::{make_many_slot_entries, Blockstore}; use solana_ledger::get_tmp_ledger_path; use solana_ledger::shred::Shred; use std::collections::VecDeque; @@ -33,7 +33,7 @@ mod tests { pub stop_size_bytes: u64, pub stop_size_iterations: u64, pub pre_generate_data: bool, - pub cleanup_blocktree: bool, + pub cleanup_blockstore: bool, pub emit_cpu_info: bool, pub assert_compaction: bool, } @@ -150,7 +150,7 @@ mod tests { let stop_size_bytes = read_env("STOP_SIZE_BYTES", DEFAULT_STOP_SIZE_BYTES); let stop_size_iterations = read_env("STOP_SIZE_ITERATIONS", DEFAULT_STOP_SIZE_ITERATIONS); let pre_generate_data = read_env("PRE_GENERATE_DATA", false); - let cleanup_blocktree = read_env("CLEANUP_BLOCKTREE", true); + let cleanup_blockstore = read_env("CLEANUP_BLOCKSTORE", true); let emit_cpu_info = read_env("EMIT_CPU_INFO", true); // set default to `true` once compaction is merged let assert_compaction = read_env("ASSERT_COMPACTION", false); @@ -163,7 +163,7 @@ mod tests { stop_size_bytes, stop_size_iterations, pre_generate_data, - cleanup_blocktree, + cleanup_blockstore, emit_cpu_info, assert_compaction, } @@ -181,11 +181,11 @@ mod tests { batch_size: u64, entries: u64, max_slots: i64, - blocktree: &Blocktree, + blockstore: &Blockstore, cpu: &CpuStatsInner, ) { let time_now = Instant::now(); - let storage_now = blocktree.storage_size().unwrap_or(0); + let storage_now = blockstore.storage_size().unwrap_or(0); let (cpu_user, cpu_system, cpu_idle) = (cpu.cpu_user, cpu.cpu_system, cpu.cpu_idle); println!( @@ -209,11 +209,11 @@ mod tests { #[test] fn test_ledger_cleanup_compaction() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap()); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap()); let config = get_benchmark_config(); eprintln!("BENCHMARK CONFIG: {:?}", config); - eprintln!("LEDGER_PATH: {:?}", &blocktree_path); + eprintln!("LEDGER_PATH: {:?}", &blockstore_path); let benchmark_slots = config.benchmark_slots; let batch_size = config.batch_size; @@ -227,7 +227,7 @@ mod tests { let (sender, receiver) = channel(); let exit = Arc::new(AtomicBool::new(false)); let cleaner = - LedgerCleanupService::new(receiver, blocktree.clone(), max_ledger_slots, &exit); + LedgerCleanupService::new(receiver, blockstore.clone(), max_ledger_slots, &exit); let exit_cpu = Arc::new(AtomicBool::new(false)); let sys = CpuStatsUpdater::new(&exit_cpu); @@ -259,7 +259,7 @@ mod tests { 0, 0, 0, - &blocktree, + &blockstore, &sys.get_stats(), ); @@ -272,7 +272,7 @@ mod tests { make_many_slot_entries(x, batch_size, entries_per_slot).0 }; - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); sender.send(x).unwrap(); emit_stats( @@ -283,7 +283,7 @@ mod tests { batch_size, batch_size, max_ledger_slots as i64, - &blocktree, + &blockstore, &sys.get_stats(), ); @@ -313,13 +313,13 @@ mod tests { 0, 0, max_ledger_slots as i64, - &blocktree, + &blockstore, &sys.get_stats(), ); // Poll on some compaction happening let start_poll = Instant::now(); - while blocktree.storage_size().unwrap_or(0) >= u1 { + while blockstore.storage_size().unwrap_or(0) >= u1 { if start_poll.elapsed().as_secs() > ROCKSDB_FLUSH_GRACE_PERIOD_SECS { break; } @@ -334,7 +334,7 @@ mod tests { 0, 0, max_ledger_slots as i64, - &blocktree, + &blockstore, &sys.get_stats(), ); @@ -350,9 +350,10 @@ mod tests { assert!(u2 < u1, "expected compaction! pre={},post={}", u1, u2); } - if config.cleanup_blocktree { - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + if config.cleanup_blockstore { + drop(blockstore); + Blockstore::destroy(&blockstore_path) + .expect("Expected successful database destruction"); } } } diff --git a/core/tests/storage_stage.rs b/core/tests/storage_stage.rs index 7a6317cc6142ff..ca3ae9f662f85c 100644 --- a/core/tests/storage_stage.rs +++ b/core/tests/storage_stage.rs @@ -7,9 +7,9 @@ mod tests { use solana_core::storage_stage::{test_cluster_info, SLOTS_PER_TURN_TEST}; use solana_core::storage_stage::{StorageStage, StorageState}; use solana_ledger::bank_forks::BankForks; - use solana_ledger::blocktree_processor; + use solana_ledger::blockstore_processor; use solana_ledger::entry; - use solana_ledger::{blocktree::Blocktree, create_new_tmp_ledger}; + use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger}; use solana_runtime::bank::Bank; use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT; use solana_sdk::hash::Hash; @@ -44,7 +44,7 @@ mod tests { .push(solana_storage_program::solana_storage_program!()); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let bank = Bank::new(&genesis_config); let bank = Arc::new(bank); @@ -63,7 +63,7 @@ mod tests { let storage_stage = StorageStage::new( &storage_state, bank_receiver, - Some(blocktree.clone()), + Some(blockstore.clone()), &keypair, &storage_keypair, &exit.clone(), @@ -109,7 +109,7 @@ mod tests { let next_bank = Arc::new(Bank::new_from_parent(&bank, &keypair.pubkey(), 2)); //register ticks so the program reports a different segment - blocktree_processor::process_entries( + blockstore_processor::process_entries( &next_bank, &entry::create_ticks( DEFAULT_TICKS_PER_SLOT * next_bank.slots_per_segment() + 1, @@ -164,7 +164,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); + let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let slot = 1; let bank = Arc::new(Bank::new(&genesis_config)); let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks( @@ -182,7 +182,7 @@ mod tests { let storage_stage = StorageStage::new( &storage_state, bank_receiver, - Some(blocktree.clone()), + Some(blockstore.clone()), &keypair, &storage_keypair, &exit.clone(), @@ -203,7 +203,7 @@ mod tests { let rooted_banks = (slot..slot + last_bank.slots_per_segment() + 1) .map(|i| { let bank = Arc::new(Bank::new_from_parent(&last_bank, &keypair.pubkey(), i)); - blocktree_processor::process_entries( + blockstore_processor::process_entries( &bank, &entry::create_ticks(64, 0, bank.last_blockhash()), true, diff --git a/genesis/src/main.rs b/genesis/src/main.rs index 353a88cde4c315..35ff42c86f6d0a 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -7,7 +7,7 @@ use solana_clap_utils::{ input_validators::{is_rfc3339_datetime, is_valid_percentage}, }; use solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account}; -use solana_ledger::{blocktree::create_new_ledger, poh::compute_hashes_per_tick}; +use solana_ledger::{blockstore::create_new_ledger, poh::compute_hashes_per_tick}; use solana_sdk::{ account::Account, clock, diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 974bc2df477328..d4beb95bc27308 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -3,14 +3,14 @@ use clap::{ }; use histogram; use serde_json::json; -use solana_ledger::blocktree_db::Database; +use solana_ledger::blockstore_db::Database; use solana_ledger::{ bank_forks::{BankForks, SnapshotConfig}, bank_forks_utils, - blocktree::Blocktree, - blocktree_db, - blocktree_db::Column, - blocktree_processor, + blockstore::Blockstore, + blockstore_db, + blockstore_db::Column, + blockstore_processor, rooted_slot_iterator::RootedSlotIterator, }; use solana_sdk::{ @@ -34,9 +34,9 @@ enum LedgerOutputMethod { Json, } -fn output_slot(blocktree: &Blocktree, slot: Slot, method: &LedgerOutputMethod) { - println!("Slot Meta {:?}", blocktree.meta(slot)); - let entries = blocktree +fn output_slot(blockstore: &Blockstore, slot: Slot, method: &LedgerOutputMethod) { + println!("Slot Meta {:?}", blockstore.meta(slot)); + let entries = blockstore .get_slot_entries(slot, 0, None) .unwrap_or_else(|err| { eprintln!("Failed to load entries for slot {}: {:?}", slot, err); @@ -116,9 +116,9 @@ fn output_slot(blocktree: &Blocktree, slot: Slot, method: &LedgerOutputMethod) { } } -fn output_ledger(blocktree: Blocktree, starting_slot: Slot, method: LedgerOutputMethod) { +fn output_ledger(blockstore: Blockstore, starting_slot: Slot, method: LedgerOutputMethod) { let rooted_slot_iterator = - RootedSlotIterator::new(starting_slot, &blocktree).unwrap_or_else(|err| { + RootedSlotIterator::new(starting_slot, &blockstore).unwrap_or_else(|err| { eprintln!( "Failed to load entries starting from slot {}: {:?}", starting_slot, err @@ -139,7 +139,7 @@ fn output_ledger(blocktree: Blocktree, starting_slot: Slot, method: LedgerOutput } } - output_slot(&blocktree, slot, &method); + output_slot(&blockstore, slot, &method); } if method == LedgerOutputMethod::Json { @@ -174,7 +174,7 @@ fn render_dot(dot: String, output_file: &str, output_format: &str) -> io::Result #[allow(clippy::cognitive_complexity)] fn graph_forks( bank_forks: BankForks, - bank_forks_info: Vec, + bank_forks_info: Vec, include_all_votes: bool, ) -> String { // Search all forks and collect the last vote made by each validator @@ -394,7 +394,7 @@ fn graph_forks( dot.join("\n") } -fn analyze_column( +fn analyze_column( db: &Database, name: &str, key_size: usize, @@ -404,7 +404,7 @@ fn analyze_column( let mut val_tot: u64 = 0; let mut row_hist = histogram::Histogram::new(); let a = key_size as u64; - for (_x, y) in db.iter::(blocktree_db::IteratorMode::Start).unwrap() { + for (_x, y) in db.iter::(blockstore_db::IteratorMode::Start).unwrap() { let b = y.len() as u64; key_tot += a; val_hist.increment(b).unwrap(); @@ -464,7 +464,7 @@ fn analyze_column( } fn analyze_storage(database: &Database) -> Result<(), String> { - use blocktree_db::columns::*; + use blockstore_db::columns::*; analyze_column::(database, "SlotMeta", SlotMeta::key_size())?; analyze_column::(database, "Orphans", Orphans::key_size())?; analyze_column::(database, "DeadSlots", DeadSlots::key_size())?; @@ -492,9 +492,9 @@ fn open_genesis_config(ledger_path: &Path) -> GenesisConfig { }) } -fn open_blocktree(ledger_path: &Path) -> Blocktree { - match Blocktree::open(ledger_path) { - Ok(blocktree) => blocktree, +fn open_blockstore(ledger_path: &Path) -> Blockstore { + match Blockstore::open(ledger_path) { + Ok(blockstore) => blockstore, Err(err) => { eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err); exit(1); @@ -669,7 +669,7 @@ fn main() { ("print", Some(args_matches)) => { let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot); output_ledger( - open_blocktree(&ledger_path), + open_blockstore(&ledger_path), starting_slot, LedgerOutputMethod::Print, ); @@ -682,7 +682,7 @@ fn main() { for slot in slots { println!("Slot {}", slot); output_slot( - &open_blocktree(&ledger_path), + &open_blockstore(&ledger_path), slot, &LedgerOutputMethod::Print, ); @@ -691,7 +691,7 @@ fn main() { ("json", Some(args_matches)) => { let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot); output_ledger( - open_blocktree(&ledger_path), + open_blockstore(&ledger_path), starting_slot, LedgerOutputMethod::Json, ); @@ -717,15 +717,15 @@ fn main() { vec![ledger_path.join("accounts")] }; - let process_options = blocktree_processor::ProcessOptions { + let process_options = blockstore_processor::ProcessOptions { poh_verify, dev_halt_at_slot, - ..blocktree_processor::ProcessOptions::default() + ..blockstore_processor::ProcessOptions::default() }; match bank_forks_utils::load( &open_genesis_config(&ledger_path), - &open_blocktree(&ledger_path), + &open_blockstore(&ledger_path), account_paths, snapshot_config.as_ref(), process_options, @@ -764,17 +764,17 @@ fn main() { } ("prune", Some(args_matches)) => { if let Some(prune_file_path) = args_matches.value_of("slot_list") { - let blocktree = open_blocktree(&ledger_path); + let blockstore = open_blockstore(&ledger_path); let prune_file = File::open(prune_file_path.to_string()).unwrap(); let slot_hashes: BTreeMap = serde_yaml::from_reader(prune_file).unwrap(); let iter = - RootedSlotIterator::new(0, &blocktree).expect("Failed to get rooted slot"); + RootedSlotIterator::new(0, &blockstore).expect("Failed to get rooted slot"); let potential_hashes: Vec<_> = iter .filter_map(|(slot, _meta)| { - let blockhash = blocktree + let blockhash = blockstore .get_slot_entries(slot, 0, None) .unwrap() .last() @@ -796,11 +796,11 @@ fn main() { .last() .expect("Failed to find a valid slot"); println!("Prune at slot {:?} hash {:?}", target_slot, target_hash); - blocktree.prune(*target_slot); + blockstore.prune(*target_slot); } } ("list-roots", Some(args_matches)) => { - let blocktree = open_blocktree(&ledger_path); + let blockstore = open_blockstore(&ledger_path); let max_height = if let Some(height) = args_matches.value_of("max_height") { usize::from_str(height).expect("Maximum height must be a number") } else { @@ -812,12 +812,12 @@ fn main() { usize::from_str(DEFAULT_ROOT_COUNT).unwrap() }; - let iter = RootedSlotIterator::new(0, &blocktree).expect("Failed to get rooted slot"); + let iter = RootedSlotIterator::new(0, &blockstore).expect("Failed to get rooted slot"); let slot_hash: Vec<_> = iter .filter_map(|(slot, _meta)| { if slot <= max_height as u64 { - let blockhash = blocktree + let blockhash = blockstore .get_slot_entries(slot, 0, None) .unwrap() .last() @@ -853,7 +853,7 @@ fn main() { }); } ("bounds", Some(args_matches)) => { - match open_blocktree(&ledger_path).slot_meta_iterator(0) { + match open_blockstore(&ledger_path).slot_meta_iterator(0) { Ok(metas) => { let all = args_matches.is_present("all"); diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 4e916b5d0faf93..89abac68fed268 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -1,7 +1,7 @@ use crate::{ bank_forks::{BankForks, SnapshotConfig}, - blocktree::Blocktree, - blocktree_processor::{self, BankForksInfo, BlocktreeProcessorError, ProcessOptions}, + blockstore::Blockstore, + blockstore_processor::{self, BankForksInfo, BlockstoreProcessorError, ProcessOptions}, leader_schedule_cache::LeaderScheduleCache, snapshot_utils, }; @@ -11,11 +11,11 @@ use std::{fs, path::PathBuf, sync::Arc}; pub fn load( genesis_config: &GenesisConfig, - blocktree: &Blocktree, + blockstore: &Blockstore, account_paths: Vec, snapshot_config: Option<&SnapshotConfig>, process_options: ProcessOptions, -) -> Result<(BankForks, Vec, LeaderScheduleCache), BlocktreeProcessorError> { +) -> Result<(BankForks, Vec, LeaderScheduleCache), BlockstoreProcessorError> { if let Some(snapshot_config) = snapshot_config.as_ref() { info!( "Initializing snapshot path: {:?}", @@ -42,9 +42,9 @@ pub fn load( ) .expect("Load from snapshot failed"); - return blocktree_processor::process_blocktree_from_root( + return blockstore_processor::process_blockstore_from_root( genesis_config, - blocktree, + blockstore, Arc::new(deserialized_bank), &process_options, ); @@ -56,9 +56,9 @@ pub fn load( } info!("Processing ledger from genesis"); - blocktree_processor::process_blocktree( + blockstore_processor::process_blockstore( &genesis_config, - &blocktree, + &blockstore, account_paths, process_options, ) diff --git a/ledger/src/blocktree.rs b/ledger/src/blockstore.rs similarity index 86% rename from ledger/src/blocktree.rs rename to ledger/src/blockstore.rs index 3765c43c710d42..53a7963107f8e3 100644 --- a/ledger/src/blocktree.rs +++ b/ledger/src/blockstore.rs @@ -1,13 +1,13 @@ -//! The `blocktree` module provides functions for parallel verification of the +//! The `blockstore` module provides functions for parallel verification of the //! Proof of History ledger as well as iterative read, append write, and random //! access read to a persistent file-based ledger. -pub use crate::{blocktree_db::BlocktreeError, blocktree_meta::SlotMeta}; +pub use crate::{blockstore_db::BlockstoreError, blockstore_meta::SlotMeta}; use crate::{ - blocktree_db::{ + blockstore_db::{ columns as cf, Column, Database, IteratorDirection, IteratorMode, LedgerColumn, Result, WriteBatch, }, - blocktree_meta::*, + blockstore_meta::*, entry::{create_ticks, Entry}, erasure::ErasureConfig, leader_schedule_cache::LeaderScheduleCache, @@ -53,7 +53,7 @@ use std::{ time::Duration, }; -pub const BLOCKTREE_DIRECTORY: &str = "rocksdb"; +pub const BLOCKSTORE_DIRECTORY: &str = "rocksdb"; thread_local!(static PAR_THREAD_POOL: RefCell = RefCell::new(rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) @@ -73,7 +73,7 @@ pub const MAX_DATA_SHREDS_PER_SLOT: usize = 32_768; pub type CompletedSlotsReceiver = Receiver>; // ledger window -pub struct Blocktree { +pub struct Blockstore { db: Arc, meta_cf: LedgerColumn, dead_slots_cf: LedgerColumn, @@ -104,7 +104,7 @@ pub struct SlotMetaWorkingSetEntry { did_insert_occur: bool, } -pub struct BlocktreeInsertionMetrics { +pub struct BlockstoreInsertionMetrics { pub num_shreds: usize, pub insert_lock_elapsed: u64, pub insert_shreds_elapsed: u64, @@ -128,7 +128,7 @@ impl SlotMetaWorkingSetEntry { } } -impl BlocktreeInsertionMetrics { +impl BlockstoreInsertionMetrics { pub fn report_metrics(&self, metric_name: &'static str) { datapoint_debug!( metric_name, @@ -158,21 +158,21 @@ impl BlocktreeInsertionMetrics { } } -impl Blocktree { +impl Blockstore { pub fn db(self) -> Arc { self.db } /// Opens a Ledger in directory, provides "infinite" window of shreds - pub fn open(ledger_path: &Path) -> Result { + pub fn open(ledger_path: &Path) -> Result { fs::create_dir_all(&ledger_path)?; - let blocktree_path = ledger_path.join(BLOCKTREE_DIRECTORY); + let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY); adjust_ulimit_nofile(); // Open the database let mut measure = Measure::start("open"); - let db = Database::open(&blocktree_path)?; + let db = Database::open(&blockstore_path)?; // Create the metadata column family let meta_cf = db.column(); @@ -203,8 +203,8 @@ impl Blocktree { let last_root = Arc::new(RwLock::new(max_root)); measure.stop(); - info!("{:?} {}", blocktree_path, measure); - Ok(Blocktree { + info!("{:?} {}", blockstore_path, measure); + Ok(Blockstore { db, meta_cf, dead_slots_cf, @@ -224,21 +224,21 @@ impl Blocktree { pub fn open_with_signal( ledger_path: &Path, ) -> Result<(Self, Receiver, CompletedSlotsReceiver)> { - let mut blocktree = Self::open(ledger_path)?; + let mut blockstore = Self::open(ledger_path)?; let (signal_sender, signal_receiver) = sync_channel(1); let (completed_slots_sender, completed_slots_receiver) = sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL); - blocktree.new_shreds_signals = vec![signal_sender]; - blocktree.completed_slots_senders = vec![completed_slots_sender]; + blockstore.new_shreds_signals = vec![signal_sender]; + blockstore.completed_slots_senders = vec![completed_slots_sender]; - Ok((blocktree, signal_receiver, completed_slots_receiver)) + Ok((blockstore, signal_receiver, completed_slots_receiver)) } pub fn destroy(ledger_path: &Path) -> Result<()> { // Database::destroy() fails if the path doesn't exist fs::create_dir_all(ledger_path)?; - let blocktree_path = ledger_path.join(BLOCKTREE_DIRECTORY); - Database::destroy(&blocktree_path) + let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY); + Database::destroy(&blockstore_path) } pub fn meta(&self, slot: Slot) -> Result> { @@ -254,7 +254,7 @@ impl Blocktree { false } - /// Silently deletes all blocktree column families starting at the given slot until the `to` slot + /// Silently deletes all blockstore column families starting at the given slot until the `to` slot /// Dangerous; Use with care: /// Does not check for integrity and does not update slot metas that refer to deleted slots /// Modifies multiple column families simultaneously @@ -459,7 +459,7 @@ impl Blocktree { for (&(slot, set_index), erasure_meta) in erasure_metas.iter() { let submit_metrics = |attempted: bool, status: String, recovered: usize| { datapoint_debug!( - "blocktree-erasure", + "blockstore-erasure", ("slot", slot as i64, i64), ("start_index", set_index as i64, i64), ( @@ -508,7 +508,7 @@ impl Blocktree { .map(|s| { // Remove from the index so it doesn't get committed. We know // this is safe to do because everything in - // `prev_inserted_codes` does not yet exist in blocktree + // `prev_inserted_codes` does not yet exist in blockstore // (guaranteed by `check_cache_coding_shred`) index.coding_mut().set_present(i, false); s @@ -553,7 +553,7 @@ impl Blocktree { if prev_inserted_codes.remove(&(slot, i)).is_some() { // Remove from the index so it doesn't get committed. We know // this is safe to do because everything in - // `prev_inserted_codes` does not yet exist in blocktree + // `prev_inserted_codes` does not yet exist in blockstore // (guaranteed by `check_cache_coding_shred`) index.coding_mut().set_present(i, false); } @@ -574,9 +574,9 @@ impl Blocktree { shreds: Vec, leader_schedule: Option<&Arc>, is_trusted: bool, - ) -> Result { + ) -> Result { let mut total_start = Measure::start("Total elapsed"); - let mut start = Measure::start("Blocktree lock"); + let mut start = Measure::start("Blockstore lock"); let _lock = self.insert_shreds_lock.lock().unwrap(); start.stop(); let insert_lock_elapsed = start.as_us(); @@ -708,7 +708,7 @@ impl Blocktree { total_start.stop(); - Ok(BlocktreeInsertionMetrics { + Ok(BlockstoreInsertionMetrics { num_shreds, total_elapsed: total_start.as_us(), insert_lock_elapsed, @@ -764,7 +764,7 @@ impl Blocktree { // This gives the index of first coding shred in this FEC block // So, all coding shreds in a given FEC block will have the same set index if is_trusted - || Blocktree::should_insert_coding_shred(&shred, index_meta.coding(), &self.last_root) + || Blockstore::should_insert_coding_shred(&shred, index_meta.coding(), &self.last_root) { let set_index = u64::from(shred.common_header.fec_set_index); let erasure_config = ErasureConfig::new( @@ -833,7 +833,7 @@ impl Blocktree { let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut(); if is_trusted - || Blocktree::should_insert_data_shred( + || Blockstore::should_insert_data_shred( &shred, slot_meta, index_meta.data(), @@ -921,7 +921,7 @@ impl Blocktree { false }; - // Check that the data shred doesn't already exist in blocktree + // Check that the data shred doesn't already exist in blockstore if shred_index < slot_meta.consumed || data_index.is_present(shred_index) { return false; } @@ -931,7 +931,7 @@ impl Blocktree { let last_index = slot_meta.last_index; if shred_index >= last_index { datapoint_error!( - "blocktree_error", + "blockstore_error", ( "error", format!( @@ -947,7 +947,7 @@ impl Blocktree { // less than our current received if last_in_slot && shred_index < slot_meta.received { datapoint_error!( - "blocktree_error", + "blockstore_error", ( "error", format!( @@ -1367,7 +1367,7 @@ impl Blocktree { return Ok(block); } } - Err(BlocktreeError::SlotNotRooted) + Err(BlockstoreError::SlotNotRooted) } fn map_transactions_to_statuses<'a>( @@ -1448,7 +1448,7 @@ impl Blocktree { start_index: u64, ) -> Result<(Vec, usize, bool)> { if self.is_dead(slot) { - return Err(BlocktreeError::DeadSlot); + return Err(BlockstoreError::DeadSlot); } let slot_meta_cf = self.db.column::(); let slot_meta = slot_meta_cf.get(slot)?; @@ -1531,7 +1531,7 @@ impl Blocktree { .expect("Shred must exist if shred index was included in a range"), ) .map_err(|err| { - BlocktreeError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( + BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( format!( "Could not reconstruct shred from shred payload: {:?}", err @@ -1546,14 +1546,14 @@ impl Blocktree { assert!(data_shreds.last().unwrap().data_complete()); let deshred_payload = Shredder::deshred(&data_shreds).map_err(|_| { - BlocktreeError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( + BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( "Could not reconstruct data block from constituent shreds".to_string(), ))) })?; debug!("{:?} shreds in last FEC set", data_shreds.len(),); bincode::deserialize::>(&deshred_payload).map_err(|_| { - BlocktreeError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( + BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( "could not reconstruct entries".to_string(), ))) }) @@ -1646,7 +1646,7 @@ impl Blocktree { results } - /// Prune blocktree such that slots higher than `target_slot` are deleted and all references to + /// Prune blockstore such that slots higher than `target_slot` are deleted and all references to /// higher slots are removed pub fn prune(&self, target_slot: Slot) { let mut meta = self @@ -1683,7 +1683,7 @@ impl Blocktree { *self.last_root.read().unwrap() } - // find the first available slot in blocktree that has some data in it + // find the first available slot in blockstore that has some data in it pub fn lowest_slot(&self) -> Slot { for (slot, meta) in self .slot_meta_iterator(0) @@ -1693,7 +1693,7 @@ impl Blocktree { return slot; } } - // This means blocktree is empty, should never get here aside from right at boot. + // This means blockstore is empty, should never get here aside from right at boot. self.last_root() } @@ -1830,7 +1830,7 @@ fn send_signals( let res = signal.try_send(slots); if let Err(TrySendError::Full(_)) = res { datapoint_error!( - "blocktree_error", + "blockstore_error", ( "error", "Unable to send newly completed slot because channel is full".to_string(), @@ -2105,11 +2105,11 @@ fn slot_has_updates(slot_meta: &SlotMeta, slot_meta_backup: &Option) - // // Returns the blockhash that can be used to append entries with. pub fn create_new_ledger(ledger_path: &Path, genesis_config: &GenesisConfig) -> Result { - Blocktree::destroy(ledger_path)?; + Blockstore::destroy(ledger_path)?; genesis_config.write(&ledger_path)?; // Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger. - let blocktree = Blocktree::open(ledger_path)?; + let blockstore = Blockstore::open(ledger_path)?; let ticks_per_slot = genesis_config.ticks_per_slot; let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0); let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash()); @@ -2121,10 +2121,10 @@ pub fn create_new_ledger(ledger_path: &Path, genesis_config: &GenesisConfig) -> let shreds = shredder.entries_to_shreds(&entries, true, 0).0; assert!(shreds.last().unwrap().last_in_slot()); - blocktree.insert_shreds(shreds, None, false)?; - blocktree.set_roots(&[0])?; - // Explicitly close the blocktree before we create the archived genesis file - drop(blocktree); + blockstore.insert_shreds(shreds, None, false)?; + blockstore.set_roots(&[0])?; + // Explicitly close the blockstore before we create the archived genesis file + drop(blockstore); let archive_path = ledger_path.join("genesis.tar.bz2"); let args = vec![ @@ -2145,7 +2145,7 @@ pub fn create_new_ledger(ledger_path: &Path, genesis_config: &GenesisConfig) -> error!("tar stdout: {}", from_utf8(&output.stdout).unwrap_or("?")); error!("tar stderr: {}", from_utf8(&output.stderr).unwrap_or("?")); - return Err(BlocktreeError::IO(IOError::new( + return Err(BlockstoreError::IO(IOError::new( ErrorKind::Other, format!( "Error trying to generate snapshot archive: {}", @@ -2167,7 +2167,7 @@ macro_rules! tmp_ledger_name { #[macro_export] macro_rules! get_tmp_ledger_path { () => { - $crate::blocktree::get_ledger_path_from_name($crate::tmp_ledger_name!()) + $crate::blockstore::get_ledger_path_from_name($crate::tmp_ledger_name!()) }; } @@ -2193,7 +2193,7 @@ pub fn get_ledger_path_from_name(name: &str) -> PathBuf { #[macro_export] macro_rules! create_new_tmp_ledger { ($genesis_config:expr) => { - $crate::blocktree::create_new_ledger_from_name($crate::tmp_ledger_name!(), $genesis_config) + $crate::blockstore::create_new_ledger_from_name($crate::tmp_ledger_name!(), $genesis_config) }; } @@ -2334,7 +2334,7 @@ fn adjust_ulimit_nofile() { pub mod tests { use super::*; use crate::{ - blocktree_processor::fill_blocktree_slot_with_ticks, + blockstore_processor::fill_blockstore_slot_with_ticks, entry::{next_entry, next_entry_mut}, genesis_utils::{create_genesis_config, GenesisConfigInfo}, leader_schedule::{FixedSchedule, LeaderSchedule}, @@ -2375,64 +2375,64 @@ pub mod tests { } // check that all columns are either empty or start at `min_slot` - fn test_all_empty_or_min(blocktree: &Blocktree, min_slot: Slot) { - let condition_met = blocktree + fn test_all_empty_or_min(blockstore: &Blockstore, min_slot: Slot) { + let condition_met = blockstore .db .iter::(IteratorMode::Start) .unwrap() .next() .map(|(slot, _)| slot >= min_slot) .unwrap_or(true) - & blocktree + & blockstore .db .iter::(IteratorMode::Start) .unwrap() .next() .map(|(slot, _)| slot >= min_slot) .unwrap_or(true) - & blocktree + & blockstore .db .iter::(IteratorMode::Start) .unwrap() .next() .map(|((slot, _), _)| slot >= min_slot) .unwrap_or(true) - & blocktree + & blockstore .db .iter::(IteratorMode::Start) .unwrap() .next() .map(|((slot, _), _)| slot >= min_slot) .unwrap_or(true) - & blocktree + & blockstore .db .iter::(IteratorMode::Start) .unwrap() .next() .map(|(slot, _)| slot >= min_slot) .unwrap_or(true) - & blocktree + & blockstore .db .iter::(IteratorMode::Start) .unwrap() .next() .map(|((slot, _), _)| slot >= min_slot) .unwrap_or(true) - & blocktree + & blockstore .db .iter::(IteratorMode::Start) .unwrap() .next() .map(|(slot, _)| slot >= min_slot) .unwrap_or(true) - & blocktree + & blockstore .db .iter::(IteratorMode::Start) .unwrap() .next() .map(|(slot, _)| slot >= min_slot) .unwrap_or(true) - & blocktree + & blockstore .db .iter::(IteratorMode::Start) .unwrap() @@ -2447,7 +2447,7 @@ pub mod tests { let mint_total = 1_000_000_000_000; let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(mint_total); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - let ledger = Blocktree::open(&ledger_path).unwrap(); + let ledger = Blockstore::open(&ledger_path).unwrap(); let ticks = create_ticks(genesis_config.ticks_per_slot, 0, genesis_config.hash()); let entries = ledger.get_slot_entries(0, 0, None).unwrap(); @@ -2456,7 +2456,7 @@ pub mod tests { // Destroying database without closing it first is undefined behavior drop(ledger); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] @@ -2468,7 +2468,7 @@ pub mod tests { let (mut shreds, _) = make_slot_entries(0, 0, num_entries); let ledger_path = get_tmp_ledger_path!(); - let ledger = Blocktree::open(&ledger_path).unwrap(); + let ledger = Blockstore::open(&ledger_path).unwrap(); // Insert last shred, test we can retrieve it let last_shred = shreds.pop().unwrap(); @@ -2487,7 +2487,7 @@ pub mod tests { assert_eq!(last_shred, deserialized_shred); // Destroying database without closing it first is undefined behavior drop(ledger); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] @@ -2497,7 +2497,7 @@ pub mod tests { { let ticks_per_slot = 10; let num_slots = 10; - let ledger = Blocktree::open(&ledger_path).unwrap(); + let ledger = Blockstore::open(&ledger_path).unwrap(); let mut ticks = vec![]; //let mut shreds_per_slot = 0 as u64; let mut shreds_per_slot = vec![]; @@ -2586,13 +2586,13 @@ pub mod tests { ); */ } - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] fn test_put_get_simple() { let ledger_path = get_tmp_ledger_path!(); - let ledger = Blocktree::open(&ledger_path).unwrap(); + let ledger = Blockstore::open(&ledger_path).unwrap(); // Test meta column family let meta = SlotMeta::new(0, 1); @@ -2636,7 +2636,7 @@ pub mod tests { // Destroying database without closing it first is undefined behavior drop(ledger); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] @@ -2647,7 +2647,7 @@ pub mod tests { let shred_bufs: Vec<_> = shreds.iter().map(|shred| shred.payload.clone()).collect(); let ledger_path = get_tmp_ledger_path!(); - let ledger = Blocktree::open(&ledger_path).unwrap(); + let ledger = Blockstore::open(&ledger_path).unwrap(); ledger.insert_shreds(shreds, None, false).unwrap(); let mut buf = [0; 4096]; @@ -2696,7 +2696,7 @@ pub mod tests { // Destroying database without closing it first is undefined behavior drop(ledger); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] @@ -2709,7 +2709,7 @@ pub mod tests { let num_shreds = shreds.len() as u64; let ledger_path = get_tmp_ledger_path!(); - let ledger = Blocktree::open(&ledger_path).unwrap(); + let ledger = Blockstore::open(&ledger_path).unwrap(); // Insert last shred, we're missing the other shreds, so no consecutive // shreds starting from slot 0, index 0 should exist. @@ -2743,7 +2743,7 @@ pub mod tests { // Destroying database without closing it first is undefined behavior drop(ledger); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] @@ -2754,7 +2754,7 @@ pub mod tests { let num_shreds = shreds.len() as u64; let ledger_path = get_tmp_ledger_path!(); - let ledger = Blocktree::open(&ledger_path).unwrap(); + let ledger = Blockstore::open(&ledger_path).unwrap(); // Insert shreds in reverse, check for consecutive returned shreds for i in (0..num_shreds).rev() { @@ -2779,7 +2779,7 @@ pub mod tests { // Destroying database without closing it first is undefined behavior drop(ledger); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] @@ -2792,9 +2792,9 @@ pub mod tests { #[test] pub fn test_iteration_order() { let slot = 0; - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Write entries let num_entries = 8; @@ -2806,11 +2806,11 @@ pub mod tests { b.set_slot(0); } - blocktree + blockstore .write_shreds(&shreds) .expect("Expected successful write of shreds"); - let mut db_iterator = blocktree + let mut db_iterator = blockstore .db .cursor::() .expect("Expected to be able to open database iterator"); @@ -2825,18 +2825,18 @@ pub mod tests { db_iterator.next(); } } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } */ #[test] pub fn test_get_slot_entries1() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let entries = create_ticks(8, 0, Hash::default()); let shreds = entries_to_test_shreds(entries[0..4].to_vec(), 1, 0, false, 0); - blocktree + blockstore .insert_shreds(shreds, None, false) .expect("Expected successful write of shreds"); @@ -2844,16 +2844,16 @@ pub mod tests { for (i, b) in shreds1.iter_mut().enumerate() { b.set_index(8 + i as u32); } - blocktree + blockstore .insert_shreds(shreds1, None, false) .expect("Expected successful write of shreds"); assert_eq!( - blocktree.get_slot_entries(1, 0, None).unwrap()[2..4], + blockstore.get_slot_entries(1, 0, None).unwrap()[2..4], entries[2..4], ); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } // This test seems to be unnecessary with introduction of data shreds. There are no @@ -2861,9 +2861,9 @@ pub mod tests { #[test] #[ignore] pub fn test_get_slot_entries2() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Write entries let num_slots = 5 as u64; @@ -2878,26 +2878,26 @@ pub mod tests { b.set_slot(slot as u64); index += 1; } - blocktree + blockstore .insert_shreds(shreds, None, false) .expect("Expected successful write of shreds"); assert_eq!( - blocktree + blockstore .get_slot_entries(slot, u64::from(index - 1), None) .unwrap(), vec![last_entry], ); } } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_get_slot_entries3() { // Test inserting/fetching shreds which contain multiple entries per shred - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_slots = 5 as u64; let shreds_per_slot = 5 as u64; let entry_serialized_size = @@ -2911,20 +2911,20 @@ pub mod tests { let shreds = entries_to_test_shreds(entries.clone(), slot, slot.saturating_sub(1), false, 0); assert!(shreds.len() as u64 >= shreds_per_slot); - blocktree + blockstore .insert_shreds(shreds, None, false) .expect("Expected successful write of shreds"); - assert_eq!(blocktree.get_slot_entries(slot, 0, None).unwrap(), entries); + assert_eq!(blockstore.get_slot_entries(slot, 0, None).unwrap(), entries); } } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_insert_data_shreds_consecutive() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Create enough entries to ensure there are at least two shreds created let min_entries = max_ticks_per_n_shreds(1) + 1; for i in 0..4 { @@ -2947,11 +2947,11 @@ pub mod tests { } } - blocktree.insert_shreds(odd_shreds, None, false).unwrap(); + blockstore.insert_shreds(odd_shreds, None, false).unwrap(); - assert_eq!(blocktree.get_slot_entries(slot, 0, None).unwrap(), vec![]); + assert_eq!(blockstore.get_slot_entries(slot, 0, None).unwrap(), vec![]); - let meta = blocktree.meta(slot).unwrap().unwrap(); + let meta = blockstore.meta(slot).unwrap().unwrap(); if num_shreds % 2 == 0 { assert_eq!(meta.received, num_shreds); } else { @@ -2965,14 +2965,14 @@ pub mod tests { assert_eq!(meta.last_index, std::u64::MAX); } - blocktree.insert_shreds(even_shreds, None, false).unwrap(); + blockstore.insert_shreds(even_shreds, None, false).unwrap(); assert_eq!( - blocktree.get_slot_entries(slot, 0, None).unwrap(), + blockstore.get_slot_entries(slot, 0, None).unwrap(), original_entries, ); - let meta = blocktree.meta(slot).unwrap().unwrap(); + let meta = blockstore.meta(slot).unwrap().unwrap(); assert_eq!(meta.received, num_shreds); assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.parent_slot, parent_slot); @@ -2980,15 +2980,15 @@ pub mod tests { } } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_insert_data_shreds_duplicate() { // Create RocksDb ledger - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Make duplicate entries and shreds let num_unique_entries = 10; @@ -2998,37 +2998,37 @@ pub mod tests { // Discard first shred original_shreds.remove(0); - blocktree + blockstore .insert_shreds(original_shreds, None, false) .unwrap(); - assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), vec![]); + assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), vec![]); let duplicate_shreds = entries_to_test_shreds(original_entries.clone(), 0, 0, true, 0); let num_shreds = duplicate_shreds.len() as u64; - blocktree + blockstore .insert_shreds(duplicate_shreds, None, false) .unwrap(); assert_eq!( - blocktree.get_slot_entries(0, 0, None).unwrap(), + blockstore.get_slot_entries(0, 0, None).unwrap(), original_entries ); - let meta = blocktree.meta(0).unwrap().unwrap(); + let meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.received, num_shreds); assert_eq!(meta.parent_slot, 0); assert_eq!(meta.last_index, num_shreds - 1); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_new_shreds_signal() { // Initialize ledger let ledger_path = get_tmp_ledger_path!(); - let (ledger, recvr, _) = Blocktree::open_with_signal(&ledger_path).unwrap(); + let (ledger, recvr, _) = Blockstore::open_with_signal(&ledger_path).unwrap(); let ledger = Arc::new(ledger); let entries_per_slot = 50; @@ -3101,14 +3101,14 @@ pub mod tests { // Destroying database without closing it first is undefined behavior drop(ledger); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] pub fn test_completed_shreds_signal() { // Initialize ledger let ledger_path = get_tmp_ledger_path!(); - let (ledger, _, recvr) = Blocktree::open_with_signal(&ledger_path).unwrap(); + let (ledger, _, recvr) = Blockstore::open_with_signal(&ledger_path).unwrap(); let ledger = Arc::new(ledger); let entries_per_slot = 10; @@ -3130,7 +3130,7 @@ pub mod tests { pub fn test_completed_shreds_signal_orphans() { // Initialize ledger let ledger_path = get_tmp_ledger_path!(); - let (ledger, _, recvr) = Blocktree::open_with_signal(&ledger_path).unwrap(); + let (ledger, _, recvr) = Blockstore::open_with_signal(&ledger_path).unwrap(); let ledger = Arc::new(ledger); let entries_per_slot = 10; @@ -3170,7 +3170,7 @@ pub mod tests { pub fn test_completed_shreds_signal_many() { // Initialize ledger let ledger_path = get_tmp_ledger_path!(); - let (ledger, _, recvr) = Blocktree::open_with_signal(&ledger_path).unwrap(); + let (ledger, _, recvr) = Blockstore::open_with_signal(&ledger_path).unwrap(); let ledger = Arc::new(ledger); let entries_per_slot = 10; @@ -3199,11 +3199,11 @@ pub mod tests { #[test] pub fn test_handle_chaining_basic() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { let entries_per_slot = 5; let num_slots = 3; - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Construct the shreds let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot); @@ -3213,8 +3213,8 @@ pub mod tests { let shreds1 = shreds .drain(shreds_per_slot..2 * shreds_per_slot) .collect_vec(); - blocktree.insert_shreds(shreds1, None, false).unwrap(); - let s1 = blocktree.meta(1).unwrap().unwrap(); + blockstore.insert_shreds(shreds1, None, false).unwrap(); + let s1 = blockstore.meta(1).unwrap().unwrap(); assert!(s1.next_slots.is_empty()); // Slot 1 is not trunk because slot 0 hasn't been inserted yet assert!(!s1.is_connected); @@ -3225,8 +3225,8 @@ pub mod tests { let shreds2 = shreds .drain(shreds_per_slot..2 * shreds_per_slot) .collect_vec(); - blocktree.insert_shreds(shreds2, None, false).unwrap(); - let s2 = blocktree.meta(2).unwrap().unwrap(); + blockstore.insert_shreds(shreds2, None, false).unwrap(); + let s2 = blockstore.meta(2).unwrap().unwrap(); assert!(s2.next_slots.is_empty()); // Slot 2 is not trunk because slot 0 hasn't been inserted yet assert!(!s2.is_connected); @@ -3235,7 +3235,7 @@ pub mod tests { // Check the first slot again, it should chain to the second slot, // but still isn't part of the trunk - let s1 = blocktree.meta(1).unwrap().unwrap(); + let s1 = blockstore.meta(1).unwrap().unwrap(); assert_eq!(s1.next_slots, vec![2]); assert!(!s1.is_connected); assert_eq!(s1.parent_slot, 0); @@ -3243,9 +3243,9 @@ pub mod tests { // 3) Write to the zeroth slot, check that every slot // is now part of the trunk - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); for i in 0..3 { - let s = blocktree.meta(i).unwrap().unwrap(); + let s = blockstore.meta(i).unwrap().unwrap(); // The last slot will not chain to any other slots if i != 2 { assert_eq!(s.next_slots, vec![i + 1]); @@ -3259,14 +3259,14 @@ pub mod tests { assert!(s.is_connected); } } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_handle_chaining_missing_slots() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_slots = 30; let entries_per_slot = 5; @@ -3293,7 +3293,7 @@ pub mod tests { } // Write the shreds for every other slot - blocktree.insert_shreds(slots, None, false).unwrap(); + blockstore.insert_shreds(slots, None, false).unwrap(); // Check metadata for i in 0..num_slots { @@ -3302,7 +3302,7 @@ pub mod tests { // However, if it's a slot we haven't inserted, aka one of the gaps, then one of the // slots we just inserted will chain to that gap, so next_slots for that orphan slot // won't be empty, but the parent slot is unknown so should equal std::u64::MAX. - let s = blocktree.meta(i as u64).unwrap().unwrap(); + let s = blockstore.meta(i as u64).unwrap().unwrap(); if i % 2 == 0 { assert_eq!(s.next_slots, vec![i as u64 + 1]); assert_eq!(s.parent_slot, std::u64::MAX); @@ -3319,12 +3319,14 @@ pub mod tests { } // Write the shreds for the other half of the slots that we didn't insert earlier - blocktree.insert_shreds(missing_slots, None, false).unwrap(); + blockstore + .insert_shreds(missing_slots, None, false) + .unwrap(); for i in 0..num_slots { // Check that all the slots chain correctly once the missing slots // have been filled - let s = blocktree.meta(i as u64).unwrap().unwrap(); + let s = blockstore.meta(i as u64).unwrap().unwrap(); if i != num_slots - 1 { assert_eq!(s.next_slots, vec![i as u64 + 1]); } else { @@ -3341,14 +3343,14 @@ pub mod tests { } } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_forward_chaining_is_connected() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_slots = 15; // Create enough entries to ensure there are at least two shreds created let entries_per_slot = max_ticks_per_n_shreds(1) + 1; @@ -3365,11 +3367,11 @@ pub mod tests { if slot % 3 == 0 { let shred0 = shreds_for_slot.remove(0); missing_shreds.push(shred0); - blocktree + blockstore .insert_shreds(shreds_for_slot, None, false) .unwrap(); } else { - blocktree + blockstore .insert_shreds(shreds_for_slot, None, false) .unwrap(); } @@ -3377,7 +3379,7 @@ pub mod tests { // Check metadata for i in 0..num_slots { - let s = blocktree.meta(i as u64).unwrap().unwrap(); + let s = blockstore.meta(i as u64).unwrap().unwrap(); // The last slot will not chain to any other slots if i as u64 != num_slots - 1 { assert_eq!(s.next_slots, vec![i as u64 + 1]); @@ -3406,10 +3408,10 @@ pub mod tests { for slot_index in 0..num_slots { if slot_index % 3 == 0 { let shred = missing_shreds.remove(0); - blocktree.insert_shreds(vec![shred], None, false).unwrap(); + blockstore.insert_shreds(vec![shred], None, false).unwrap(); for i in 0..num_slots { - let s = blocktree.meta(i as u64).unwrap().unwrap(); + let s = blockstore.meta(i as u64).unwrap().unwrap(); if i != num_slots - 1 { assert_eq!(s.next_slots, vec![i as u64 + 1]); } else { @@ -3432,14 +3434,14 @@ pub mod tests { } } } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } /* #[test] pub fn test_chaining_tree() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_tree_levels = 6; assert!(num_tree_levels > 1); let branching_factor: u64 = 4; @@ -3485,11 +3487,11 @@ pub mod tests { // Randomly pick whether to insert erasure or coding shreds first if rng.gen_bool(0.5) { - blocktree.write_shreds(slot_shreds).unwrap(); - blocktree.put_shared_coding_shreds(&coding_shreds).unwrap(); + blockstore.write_shreds(slot_shreds).unwrap(); + blockstore.put_shared_coding_shreds(&coding_shreds).unwrap(); } else { - blocktree.put_shared_coding_shreds(&coding_shreds).unwrap(); - blocktree.write_shreds(slot_shreds).unwrap(); + blockstore.put_shared_coding_shreds(&coding_shreds).unwrap(); + blockstore.write_shreds(slot_shreds).unwrap(); } } @@ -3497,7 +3499,7 @@ pub mod tests { let last_level = (branching_factor.pow(num_tree_levels - 1) - 1) / (branching_factor - 1); for slot in 0..num_slots { - let slot_meta = blocktree.meta(slot).unwrap().unwrap(); + let slot_meta = blockstore.meta(slot).unwrap().unwrap(); assert_eq!(slot_meta.consumed, entries_per_slot); assert_eq!(slot_meta.received, entries_per_slot); assert!(slot_meta.is_connected); @@ -3530,54 +3532,57 @@ pub mod tests { } // No orphan slots should exist - assert!(blocktree.orphans_cf.is_empty().unwrap()) + assert!(blockstore.orphans_cf.is_empty().unwrap()) } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } */ #[test] pub fn test_get_slots_since() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Slot doesn't exist - assert!(blocktree.get_slots_since(&vec![0]).unwrap().is_empty()); + assert!(blockstore.get_slots_since(&vec![0]).unwrap().is_empty()); let mut meta0 = SlotMeta::new(0, 0); - blocktree.meta_cf.put(0, &meta0).unwrap(); + blockstore.meta_cf.put(0, &meta0).unwrap(); // Slot exists, chains to nothing let expected: HashMap> = HashMap::from_iter(vec![(0, vec![])].into_iter()); - assert_eq!(blocktree.get_slots_since(&vec![0]).unwrap(), expected); + assert_eq!(blockstore.get_slots_since(&vec![0]).unwrap(), expected); meta0.next_slots = vec![1, 2]; - blocktree.meta_cf.put(0, &meta0).unwrap(); + blockstore.meta_cf.put(0, &meta0).unwrap(); // Slot exists, chains to some other slots let expected: HashMap> = HashMap::from_iter(vec![(0, vec![1, 2])].into_iter()); - assert_eq!(blocktree.get_slots_since(&vec![0]).unwrap(), expected); - assert_eq!(blocktree.get_slots_since(&vec![0, 1]).unwrap(), expected); + assert_eq!(blockstore.get_slots_since(&vec![0]).unwrap(), expected); + assert_eq!(blockstore.get_slots_since(&vec![0, 1]).unwrap(), expected); let mut meta3 = SlotMeta::new(3, 1); meta3.next_slots = vec![10, 5]; - blocktree.meta_cf.put(3, &meta3).unwrap(); + blockstore.meta_cf.put(3, &meta3).unwrap(); let expected: HashMap> = HashMap::from_iter(vec![(0, vec![1, 2]), (3, vec![10, 5])].into_iter()); - assert_eq!(blocktree.get_slots_since(&vec![0, 1, 3]).unwrap(), expected); + assert_eq!( + blockstore.get_slots_since(&vec![0, 1, 3]).unwrap(), + expected + ); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_orphans() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Create shreds and entries let entries_per_slot = 1; @@ -3587,61 +3592,61 @@ pub mod tests { // Write slot 2, which chains to slot 1. We're missing slot 0, // so slot 1 is the orphan let shreds_for_slot = shreds.drain((shreds_per_slot * 2)..).collect_vec(); - blocktree + blockstore .insert_shreds(shreds_for_slot, None, false) .unwrap(); - let meta = blocktree + let meta = blockstore .meta(1) .expect("Expect database get to succeed") .unwrap(); assert!(is_orphan(&meta)); - assert_eq!(blocktree.get_orphans(None), vec![1]); + assert_eq!(blockstore.get_orphans(None), vec![1]); // Write slot 1 which chains to slot 0, so now slot 0 is the // orphan, and slot 1 is no longer the orphan. let shreds_for_slot = shreds.drain(shreds_per_slot..).collect_vec(); - blocktree + blockstore .insert_shreds(shreds_for_slot, None, false) .unwrap(); - let meta = blocktree + let meta = blockstore .meta(1) .expect("Expect database get to succeed") .unwrap(); assert!(!is_orphan(&meta)); - let meta = blocktree + let meta = blockstore .meta(0) .expect("Expect database get to succeed") .unwrap(); assert!(is_orphan(&meta)); - assert_eq!(blocktree.get_orphans(None), vec![0]); + assert_eq!(blockstore.get_orphans(None), vec![0]); // Write some slot that also chains to existing slots and orphan, // nothing should change let (shred4, _) = make_slot_entries(4, 0, 1); let (shred5, _) = make_slot_entries(5, 1, 1); - blocktree.insert_shreds(shred4, None, false).unwrap(); - blocktree.insert_shreds(shred5, None, false).unwrap(); - assert_eq!(blocktree.get_orphans(None), vec![0]); + blockstore.insert_shreds(shred4, None, false).unwrap(); + blockstore.insert_shreds(shred5, None, false).unwrap(); + assert_eq!(blockstore.get_orphans(None), vec![0]); // Write zeroth slot, no more orphans - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); for i in 0..3 { - let meta = blocktree + let meta = blockstore .meta(i) .expect("Expect database get to succeed") .unwrap(); assert!(!is_orphan(&meta)); } // Orphans cf is empty - assert!(blocktree.orphans_cf.is_empty().unwrap()) + assert!(blockstore.orphans_cf.is_empty().unwrap()) } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } fn test_insert_data_shreds_slots(name: &str, should_bulk_write: bool) { - let blocktree_path = get_ledger_path_from_name(name); + let blockstore_path = get_ledger_path_from_name(name); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Create shreds and entries let num_entries = 20 as u64; @@ -3670,21 +3675,21 @@ pub mod tests { let num_shreds = shreds.len(); // Write shreds to the database if should_bulk_write { - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); } else { for _ in 0..num_shreds { let shred = shreds.remove(0); - blocktree.insert_shreds(vec![shred], None, false).unwrap(); + blockstore.insert_shreds(vec![shred], None, false).unwrap(); } } for i in 0..num_entries - 1 { assert_eq!( - blocktree.get_slot_entries(i, 0, None).unwrap()[0], + blockstore.get_slot_entries(i, 0, None).unwrap()[0], entries[i as usize] ); - let meta = blocktree.meta(i).unwrap().unwrap(); + let meta = blockstore.meta(i).unwrap().unwrap(); assert_eq!(meta.received, 1); assert_eq!(meta.last_index, 0); if i != 0 { @@ -3696,14 +3701,14 @@ pub mod tests { } } } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_find_missing_data_indexes() { let slot = 0; - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Write entries let gap: u64 = 10; @@ -3718,7 +3723,7 @@ pub mod tests { s.set_index(i as u32 * gap as u32); s.set_slot(slot); } - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); // Index of the first shred is 0 // Index of the second shred is "gap" @@ -3726,27 +3731,27 @@ pub mod tests { // range of [0, gap) let expected: Vec = (1..gap).collect(); assert_eq!( - blocktree.find_missing_data_indexes(slot, 0, 0, gap, gap as usize), + blockstore.find_missing_data_indexes(slot, 0, 0, gap, gap as usize), expected ); assert_eq!( - blocktree.find_missing_data_indexes(slot, 0, 1, gap, (gap - 1) as usize), + blockstore.find_missing_data_indexes(slot, 0, 1, gap, (gap - 1) as usize), expected, ); assert_eq!( - blocktree.find_missing_data_indexes(slot, 0, 0, gap - 1, (gap - 1) as usize), + blockstore.find_missing_data_indexes(slot, 0, 0, gap - 1, (gap - 1) as usize), &expected[..expected.len() - 1], ); assert_eq!( - blocktree.find_missing_data_indexes(slot, 0, gap - 2, gap, gap as usize), + blockstore.find_missing_data_indexes(slot, 0, gap - 2, gap, gap as usize), vec![gap - 2, gap - 1], ); assert_eq!( - blocktree.find_missing_data_indexes(slot, 0, gap - 2, gap, 1), + blockstore.find_missing_data_indexes(slot, 0, gap - 2, gap, 1), vec![gap - 2], ); assert_eq!( - blocktree.find_missing_data_indexes(slot, 0, 0, gap, 1), + blockstore.find_missing_data_indexes(slot, 0, 0, gap, 1), vec![1], ); @@ -3755,11 +3760,11 @@ pub mod tests { let mut expected: Vec = (1..gap).collect(); expected.push(gap + 1); assert_eq!( - blocktree.find_missing_data_indexes(slot, 0, 0, gap + 2, (gap + 2) as usize), + blockstore.find_missing_data_indexes(slot, 0, 0, gap + 2, (gap + 2) as usize), expected, ); assert_eq!( - blocktree.find_missing_data_indexes(slot, 0, 0, gap + 2, (gap - 1) as usize), + blockstore.find_missing_data_indexes(slot, 0, 0, gap + 2, (gap - 1) as usize), &expected[..expected.len() - 1], ); @@ -3773,7 +3778,7 @@ pub mod tests { }) .collect(); assert_eq!( - blocktree.find_missing_data_indexes( + blockstore.find_missing_data_indexes( slot, 0, j * gap, @@ -3785,15 +3790,15 @@ pub mod tests { } } - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_find_missing_data_indexes_timeout() { let slot = 0; - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Write entries let gap: u64 = 10; @@ -3812,36 +3817,48 @@ pub mod tests { ) }) .collect(); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); let empty: Vec = vec![]; assert_eq!( - blocktree.find_missing_data_indexes(slot, timestamp(), 0, 50, 1), + blockstore.find_missing_data_indexes(slot, timestamp(), 0, 50, 1), empty ); let expected: Vec<_> = (1..=9).collect(); assert_eq!( - blocktree.find_missing_data_indexes(slot, timestamp() - 400, 0, 50, 9), + blockstore.find_missing_data_indexes(slot, timestamp() - 400, 0, 50, 9), expected ); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_find_missing_data_indexes_sanity() { let slot = 0; - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Early exit conditions let empty: Vec = vec![]; - assert_eq!(blocktree.find_missing_data_indexes(slot, 0, 0, 0, 1), empty); - assert_eq!(blocktree.find_missing_data_indexes(slot, 0, 5, 5, 1), empty); - assert_eq!(blocktree.find_missing_data_indexes(slot, 0, 4, 3, 1), empty); - assert_eq!(blocktree.find_missing_data_indexes(slot, 0, 1, 2, 0), empty); + assert_eq!( + blockstore.find_missing_data_indexes(slot, 0, 0, 0, 1), + empty + ); + assert_eq!( + blockstore.find_missing_data_indexes(slot, 0, 5, 5, 1), + empty + ); + assert_eq!( + blockstore.find_missing_data_indexes(slot, 0, 4, 3, 1), + empty + ); + assert_eq!( + blockstore.find_missing_data_indexes(slot, 0, 1, 2, 0), + empty + ); let entries = create_ticks(100, 0, Hash::default()); let mut shreds = entries_to_test_shreds(entries, slot, 0, true, 0); @@ -3855,7 +3872,7 @@ pub mod tests { shreds[1].set_index(OTHER as u32); // Insert one shred at index = first_index - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); const STARTS: u64 = OTHER * 2; const END: u64 = OTHER * 3; @@ -3864,7 +3881,7 @@ pub mod tests { // given the input range of [i, first_index], the missing indexes should be // [i, first_index - 1] for start in 0..STARTS { - let result = blocktree.find_missing_data_indexes( + let result = blockstore.find_missing_data_indexes( slot, 0, start, // start END, //end MAX, //max @@ -3873,15 +3890,15 @@ pub mod tests { assert_eq!(result, expected); } - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_no_missing_shred_indexes() { let slot = 0; - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Write entries let num_entries = 10; @@ -3889,42 +3906,42 @@ pub mod tests { let shreds = entries_to_test_shreds(entries, slot, 0, true, 0); let num_shreds = shreds.len(); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); let empty: Vec = vec![]; for i in 0..num_shreds as u64 { for j in 0..i { assert_eq!( - blocktree.find_missing_data_indexes(slot, 0, j, i, (i - j) as usize), + blockstore.find_missing_data_indexes(slot, 0, j, i, (i - j) as usize), empty ); } } - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_should_insert_data_shred() { let (mut shreds, _) = make_slot_entries(0, 0, 200); - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); - let index_cf = blocktree.db.column::(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); + let index_cf = blockstore.db.column::(); let last_root = RwLock::new(0); // Insert the first 5 shreds, we don't have a "is_last" shred yet - blocktree + blockstore .insert_shreds(shreds[0..5].to_vec(), None, false) .unwrap(); // Trying to insert a shred less than `slot_meta.consumed` should fail - let slot_meta = blocktree.meta(0).unwrap().unwrap(); + let slot_meta = blockstore.meta(0).unwrap().unwrap(); let index = index_cf.get(0).unwrap().unwrap(); assert_eq!(slot_meta.consumed, 5); assert_eq!( - Blocktree::should_insert_data_shred( + Blockstore::should_insert_data_shred( &shreds[1], &slot_meta, index.data(), @@ -3935,13 +3952,13 @@ pub mod tests { // Trying to insert the same shred again should fail // skip over shred 5 so the `slot_meta.consumed` doesn't increment - blocktree + blockstore .insert_shreds(shreds[6..7].to_vec(), None, false) .unwrap(); - let slot_meta = blocktree.meta(0).unwrap().unwrap(); + let slot_meta = blockstore.meta(0).unwrap().unwrap(); let index = index_cf.get(0).unwrap().unwrap(); assert_eq!( - Blocktree::should_insert_data_shred( + Blockstore::should_insert_data_shred( &shreds[6], &slot_meta, index.data(), @@ -3952,10 +3969,10 @@ pub mod tests { // Trying to insert another "is_last" shred with index < the received index should fail // skip over shred 7 - blocktree + blockstore .insert_shreds(shreds[8..9].to_vec(), None, false) .unwrap(); - let slot_meta = blocktree.meta(0).unwrap().unwrap(); + let slot_meta = blockstore.meta(0).unwrap().unwrap(); let index = index_cf.get(0).unwrap().unwrap(); assert_eq!(slot_meta.received, 9); let shred7 = { @@ -3967,14 +3984,14 @@ pub mod tests { } }; assert_eq!( - Blocktree::should_insert_data_shred(&shred7, &slot_meta, index.data(), &last_root), + Blockstore::should_insert_data_shred(&shred7, &slot_meta, index.data(), &last_root), false ); // Insert all pending shreds let mut shred8 = shreds[8].clone(); - blocktree.insert_shreds(shreds, None, false).unwrap(); - let slot_meta = blocktree.meta(0).unwrap().unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let slot_meta = blockstore.meta(0).unwrap().unwrap(); let index = index_cf.get(0).unwrap().unwrap(); // Trying to insert a shred with index > the "is_last" shred should fail @@ -3984,19 +4001,19 @@ pub mod tests { panic!("Shred in unexpected format") } assert_eq!( - Blocktree::should_insert_data_shred(&shred7, &slot_meta, index.data(), &last_root), + Blockstore::should_insert_data_shred(&shred7, &slot_meta, index.data(), &last_root), false ); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_should_insert_coding_shred() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); - let index_cf = blocktree.db.column::(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); + let index_cf = blockstore.db.column::(); let last_root = RwLock::new(0); let slot = 1; @@ -4009,21 +4026,21 @@ pub mod tests { ); // Insert a good coding shred - assert!(Blocktree::should_insert_coding_shred( + assert!(Blockstore::should_insert_coding_shred( &coding_shred, Index::new(slot).coding(), &last_root )); // Insertion should succeed - blocktree + blockstore .insert_shreds(vec![coding_shred.clone()], None, false) .unwrap(); // Trying to insert the same shred again should fail { let index = index_cf.get(shred.slot).unwrap().unwrap(); - assert!(!Blocktree::should_insert_coding_shred( + assert!(!Blockstore::should_insert_coding_shred( &coding_shred, index.coding(), &last_root @@ -4040,7 +4057,7 @@ pub mod tests { coding.clone(), ); let index = index_cf.get(shred.slot).unwrap().unwrap(); - assert!(Blocktree::should_insert_coding_shred( + assert!(Blockstore::should_insert_coding_shred( &coding_shred, index.coding(), &last_root @@ -4058,7 +4075,7 @@ pub mod tests { coding_shred.set_index(index as u32); let index = index_cf.get(coding_shred.slot()).unwrap().unwrap(); - assert!(!Blocktree::should_insert_coding_shred( + assert!(!Blockstore::should_insert_coding_shred( &coding_shred, index.coding(), &last_root @@ -4074,7 +4091,7 @@ pub mod tests { ); coding_shred.coding_header.num_coding_shreds = 0; let index = index_cf.get(coding_shred.slot()).unwrap().unwrap(); - assert!(!Blocktree::should_insert_coding_shred( + assert!(!Blockstore::should_insert_coding_shred( &coding_shred, index.coding(), &last_root @@ -4090,7 +4107,7 @@ pub mod tests { ); coding_shred.coding_header.num_coding_shreds = coding_shred.coding_header.position; let index = index_cf.get(coding_shred.slot()).unwrap().unwrap(); - assert!(!Blocktree::should_insert_coding_shred( + assert!(!Blockstore::should_insert_coding_shred( &coding_shred, index.coding(), &last_root @@ -4110,7 +4127,7 @@ pub mod tests { coding_shred.common_header.index = std::u32::MAX - 1; coding_shred.coding_header.position = 0; let index = index_cf.get(coding_shred.slot()).unwrap().unwrap(); - assert!(!Blocktree::should_insert_coding_shred( + assert!(!Blockstore::should_insert_coding_shred( &coding_shred, index.coding(), &last_root @@ -4118,14 +4135,14 @@ pub mod tests { // Decreasing the number of num_coding_shreds will put it within the allowed limit coding_shred.coding_header.num_coding_shreds = 2; - assert!(Blocktree::should_insert_coding_shred( + assert!(Blockstore::should_insert_coding_shred( &coding_shred, index.coding(), &last_root )); // Insertion should succeed - blocktree + blockstore .insert_shreds(vec![coding_shred], None, false) .unwrap(); } @@ -4139,7 +4156,7 @@ pub mod tests { ); let index = index_cf.get(coding_shred.slot()).unwrap().unwrap(); coding_shred.set_slot(*last_root.read().unwrap()); - assert!(!Blocktree::should_insert_coding_shred( + assert!(!Blockstore::should_insert_coding_shred( &coding_shred, index.coding(), &last_root @@ -4147,18 +4164,18 @@ pub mod tests { } } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_insert_multiple_is_last() { let (shreds, _) = make_slot_entries(0, 0, 20); let num_shreds = shreds.len() as u64; - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); - blocktree.insert_shreds(shreds, None, false).unwrap(); - let slot_meta = blocktree.meta(0).unwrap().unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let slot_meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(slot_meta.consumed, num_shreds); assert_eq!(slot_meta.received, num_shreds); @@ -4166,82 +4183,82 @@ pub mod tests { assert!(slot_meta.is_full()); let (shreds, _) = make_slot_entries(0, 0, 22); - blocktree.insert_shreds(shreds, None, false).unwrap(); - let slot_meta = blocktree.meta(0).unwrap().unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); + let slot_meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(slot_meta.consumed, num_shreds); assert_eq!(slot_meta.received, num_shreds); assert_eq!(slot_meta.last_index, num_shreds - 1); assert!(slot_meta.is_full()); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_slot_data_iterator() { // Construct the shreds - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let shreds_per_slot = 10; let slots = vec![2, 4, 8, 12]; let all_shreds = make_chaining_slot_entries(&slots, shreds_per_slot); let slot_8_shreds = all_shreds[2].0.clone(); for (slot_shreds, _) in all_shreds { - blocktree.insert_shreds(slot_shreds, None, false).unwrap(); + blockstore.insert_shreds(slot_shreds, None, false).unwrap(); } // Slot doesnt exist, iterator should be empty - let shred_iter = blocktree.slot_data_iterator(5).unwrap(); + let shred_iter = blockstore.slot_data_iterator(5).unwrap(); let result: Vec<_> = shred_iter.collect(); assert_eq!(result, vec![]); // Test that the iterator for slot 8 contains what was inserted earlier - let shred_iter = blocktree.slot_data_iterator(8).unwrap(); + let shred_iter = blockstore.slot_data_iterator(8).unwrap(); let result: Vec = shred_iter .filter_map(|(_, bytes)| Shred::new_from_serialized_shred(bytes.to_vec()).ok()) .collect(); assert_eq!(result.len(), slot_8_shreds.len()); assert_eq!(result, slot_8_shreds); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_set_roots() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let chained_slots = vec![0, 2, 4, 7, 12, 15]; - assert_eq!(blocktree.last_root(), 0); + assert_eq!(blockstore.last_root(), 0); - blocktree.set_roots(&chained_slots).unwrap(); + blockstore.set_roots(&chained_slots).unwrap(); - assert_eq!(blocktree.last_root(), 15); + assert_eq!(blockstore.last_root(), 15); for i in chained_slots { - assert!(blocktree.is_root(i)); + assert!(blockstore.is_root(i)); } - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_prune() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let (shreds, _) = make_many_slot_entries(0, 50, 6); let shreds_per_slot = shreds.len() as u64 / 50; - blocktree.insert_shreds(shreds, None, false).unwrap(); - blocktree + blockstore.insert_shreds(shreds, None, false).unwrap(); + blockstore .slot_meta_iterator(0) .unwrap() .for_each(|(_, meta)| assert_eq!(meta.last_index, shreds_per_slot - 1)); - blocktree.prune(5); + blockstore.prune(5); - blocktree + blockstore .slot_meta_iterator(0) .unwrap() .for_each(|(slot, meta)| { @@ -4249,7 +4266,7 @@ pub mod tests { assert_eq!(meta.last_index, shreds_per_slot - 1) }); - let data_iter = blocktree + let data_iter = blockstore .data_shred_cf .iter(IteratorMode::From((0, 0), IteratorDirection::Forward)) .unwrap(); @@ -4259,76 +4276,79 @@ pub mod tests { } } - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_purge_slots() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let (shreds, _) = make_many_slot_entries(0, 50, 5); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); - blocktree.purge_slots(0, Some(5)); + blockstore.purge_slots(0, Some(5)); - test_all_empty_or_min(&blocktree, 6); + test_all_empty_or_min(&blockstore, 6); - blocktree.purge_slots(0, None); + blockstore.purge_slots(0, None); - // min slot shouldn't matter, blocktree should be empty - test_all_empty_or_min(&blocktree, 100); - test_all_empty_or_min(&blocktree, 0); + // min slot shouldn't matter, blockstore should be empty + test_all_empty_or_min(&blockstore, 100); + test_all_empty_or_min(&blockstore, 0); - blocktree.slot_meta_iterator(0).unwrap().for_each(|(_, _)| { - assert!(false); - }); + blockstore + .slot_meta_iterator(0) + .unwrap() + .for_each(|(_, _)| { + assert!(false); + }); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_purge_huge() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let (shreds, _) = make_many_slot_entries(0, 5000, 10); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); - blocktree.purge_slots(0, Some(4999)); + blockstore.purge_slots(0, Some(4999)); - test_all_empty_or_min(&blocktree, 5000); + test_all_empty_or_min(&blockstore, 5000); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[should_panic] #[test] fn test_prune_out_of_bounds() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // slot 5 does not exist, prune should panic - blocktree.prune(5); + blockstore.prune(5); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_iter_bounds() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // slot 5 does not exist, iter should be ok and should be a noop - blocktree + blockstore .slot_meta_iterator(5) .unwrap() .for_each(|_| assert!(false)); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] @@ -4339,7 +4359,7 @@ pub mod tests { let start_index = 0; let consumed = 1; assert_eq!( - Blocktree::get_completed_data_ranges( + Blockstore::get_completed_data_ranges( start_index, &completed_data_end_indexes[..], consumed @@ -4350,7 +4370,7 @@ pub mod tests { let start_index = 0; let consumed = 3; assert_eq!( - Blocktree::get_completed_data_ranges( + Blockstore::get_completed_data_ranges( start_index, &completed_data_end_indexes[..], consumed @@ -4380,7 +4400,7 @@ pub mod tests { ); assert_eq!( - Blocktree::get_completed_data_ranges( + Blockstore::get_completed_data_ranges( start_index, &completed_data_end_indexes[..], consumed @@ -4393,19 +4413,19 @@ pub mod tests { #[test] fn test_get_slot_entries_with_shred_count_corruption() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_ticks = 8; let entries = create_ticks(num_ticks, 0, Hash::default()); let slot = 1; let shreds = entries_to_test_shreds(entries, slot, 0, false, 0); let next_shred_index = shreds.len(); - blocktree + blockstore .insert_shreds(shreds, None, false) .expect("Expected successful write of shreds"); assert_eq!( - blocktree.get_slot_entries(slot, 0, None).unwrap().len() as u64, + blockstore.get_slot_entries(slot, 0, None).unwrap().len() as u64, num_ticks ); @@ -4424,12 +4444,12 @@ pub mod tests { // With the corruption, nothing should be returned, even though an // earlier data block was valid - blocktree + blockstore .insert_shreds(shreds, None, false) .expect("Expected successful write of shreds"); - assert!(blocktree.get_slot_entries(slot, 0, None).is_err()); + assert!(blockstore.get_slot_entries(slot, 0, None).is_err()); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] @@ -4437,12 +4457,12 @@ pub mod tests { // This tests correctness of the SlotMeta in various cases in which a shred // that gets filtered out by checks let (shreds0, _) = make_slot_entries(0, 0, 200); - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Insert the first 5 shreds, we don't have a "is_last" shred yet - blocktree + blockstore .insert_shreds(shreds0[0..5].to_vec(), None, false) .unwrap(); @@ -4453,37 +4473,37 @@ pub mod tests { let (mut shreds3, _) = make_slot_entries(3, 0, 200); shreds2.push(shreds0[1].clone()); shreds3.insert(0, shreds0[1].clone()); - blocktree.insert_shreds(shreds2, None, false).unwrap(); - let slot_meta = blocktree.meta(0).unwrap().unwrap(); + blockstore.insert_shreds(shreds2, None, false).unwrap(); + let slot_meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(slot_meta.next_slots, vec![2]); - blocktree.insert_shreds(shreds3, None, false).unwrap(); - let slot_meta = blocktree.meta(0).unwrap().unwrap(); + blockstore.insert_shreds(shreds3, None, false).unwrap(); + let slot_meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(slot_meta.next_slots, vec![2, 3]); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_trusted_insert_shreds() { // Make shred for slot 1 let (shreds1, _) = make_slot_entries(1, 0, 1); - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); let last_root = 100; { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); - blocktree.set_roots(&[last_root]).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); + blockstore.set_roots(&[last_root]).unwrap(); // Insert will fail, slot < root - blocktree + blockstore .insert_shreds(shreds1.clone()[..].to_vec(), None, false) .unwrap(); - assert!(blocktree.get_data_shred(1, 0).unwrap().is_none()); + assert!(blockstore.get_data_shred(1, 0).unwrap().is_none()); // Insert through trusted path will succeed - blocktree + blockstore .insert_shreds(shreds1[..].to_vec(), None, true) .unwrap(); - assert!(blocktree.get_data_shred(1, 0).unwrap().is_some()); + assert!(blockstore.get_data_shred(1, 0).unwrap().is_some()); } } @@ -4491,14 +4511,14 @@ pub mod tests { fn test_get_timestamp_slots() { let timestamp_sample_range = 5; let ticks_per_slot = 5; - // Smaller interval than TIMESTAMP_SLOT_INTERVAL for convenience of building blocktree + // Smaller interval than TIMESTAMP_SLOT_INTERVAL for convenience of building blockstore let timestamp_interval = 7; /* - Build a blocktree with < TIMESTAMP_SLOT_RANGE roots + Build a blockstore with < TIMESTAMP_SLOT_RANGE roots */ - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); - blocktree.set_roots(&[0]).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); + blockstore.set_roots(&[0]).unwrap(); let mut last_entry_hash = Hash::default(); for slot in 0..=3 { let parent = { @@ -4508,36 +4528,36 @@ pub mod tests { slot - 1 } }; - last_entry_hash = fill_blocktree_slot_with_ticks( - &blocktree, + last_entry_hash = fill_blockstore_slot_with_ticks( + &blockstore, ticks_per_slot, slot, parent, last_entry_hash, ); } - blocktree.set_roots(&[1, 2, 3]).unwrap(); + blockstore.set_roots(&[1, 2, 3]).unwrap(); assert_eq!( - blocktree.get_timestamp_slots(2, timestamp_interval, timestamp_sample_range), + blockstore.get_timestamp_slots(2, timestamp_interval, timestamp_sample_range), vec![1, 2] ); assert_eq!( - blocktree.get_timestamp_slots(3, timestamp_interval, timestamp_sample_range), + blockstore.get_timestamp_slots(3, timestamp_interval, timestamp_sample_range), vec![1, 2, 3] ); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); /* - Build a blocktree in the ledger with the following rooted slots: + Build a blockstore in the ledger with the following rooted slots: [0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 14, 15, 16, 17] */ - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); - blocktree.set_roots(&[0]).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); + blockstore.set_roots(&[0]).unwrap(); let desired_roots = vec![1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19]; let mut last_entry_hash = Hash::default(); for (i, slot) in desired_roots.iter().enumerate() { @@ -4548,34 +4568,34 @@ pub mod tests { desired_roots[i - 1] } }; - last_entry_hash = fill_blocktree_slot_with_ticks( - &blocktree, + last_entry_hash = fill_blockstore_slot_with_ticks( + &blockstore, ticks_per_slot, *slot, parent, last_entry_hash, ); } - blocktree.set_roots(&desired_roots).unwrap(); + blockstore.set_roots(&desired_roots).unwrap(); assert_eq!( - blocktree.get_timestamp_slots(2, timestamp_interval, timestamp_sample_range), + blockstore.get_timestamp_slots(2, timestamp_interval, timestamp_sample_range), vec![1, 2] ); assert_eq!( - blocktree.get_timestamp_slots(8, timestamp_interval, timestamp_sample_range), + blockstore.get_timestamp_slots(8, timestamp_interval, timestamp_sample_range), vec![1, 2, 3, 4, 5] ); assert_eq!( - blocktree.get_timestamp_slots(13, timestamp_interval, timestamp_sample_range), + blockstore.get_timestamp_slots(13, timestamp_interval, timestamp_sample_range), vec![8, 9, 10, 11, 12] ); assert_eq!( - blocktree.get_timestamp_slots(18, timestamp_interval, timestamp_sample_range), + blockstore.get_timestamp_slots(18, timestamp_interval, timestamp_sample_range), vec![8, 9, 10, 11, 12] ); assert_eq!( - blocktree.get_timestamp_slots(19, timestamp_interval, timestamp_sample_range), + blockstore.get_timestamp_slots(19, timestamp_interval, timestamp_sample_range), vec![14, 16, 17, 18, 19] ); } @@ -4588,7 +4608,7 @@ pub mod tests { let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0); let more_shreds = entries_to_test_shreds(entries.clone(), slot + 1, slot, true, 0); let ledger_path = get_tmp_ledger_path!(); - let ledger = Blocktree::open(&ledger_path).unwrap(); + let ledger = Blockstore::open(&ledger_path).unwrap(); ledger.insert_shreds(shreds, None, false).unwrap(); ledger.insert_shreds(more_shreds, None, false).unwrap(); ledger.set_roots(&[slot - 1, slot, slot + 1]).unwrap(); @@ -4650,7 +4670,7 @@ pub mod tests { // Even if marked as root, a slot that is empty of entries should return an error let confirmed_block_err = ledger.get_confirmed_block(slot - 1, None).unwrap_err(); - assert_matches!(confirmed_block_err, BlocktreeError::SlotNotRooted); + assert_matches!(confirmed_block_err, BlockstoreError::SlotNotRooted); let confirmed_block = ledger.get_confirmed_block(slot, None).unwrap(); assert_eq!(confirmed_block.transactions.len(), 100); @@ -4695,10 +4715,10 @@ pub mod tests { assert_eq!(confirmed_block, expected_block); let not_root = ledger.get_confirmed_block(slot + 2, None).unwrap_err(); - assert_matches!(not_root, BlocktreeError::SlotNotRooted); + assert_matches!(not_root, BlockstoreError::SlotNotRooted); drop(ledger); - Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); + Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] @@ -4733,26 +4753,26 @@ pub mod tests { } let shreds = entries_to_test_shreds(vote_entries.clone(), 1, 0, true, 0); let ledger_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&ledger_path).unwrap(); - blocktree.insert_shreds(shreds, None, false).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); // Populate slot 2 with ticks only - fill_blocktree_slot_with_ticks(&blocktree, 6, 2, 1, Hash::default()); - blocktree.set_roots(&[0, 1, 2]).unwrap(); + fill_blockstore_slot_with_ticks(&blockstore, 6, 2, 1, Hash::default()); + blockstore.set_roots(&[0, 1, 2]).unwrap(); assert_eq!( - blocktree.get_block_timestamps(1).unwrap(), + blockstore.get_block_timestamps(1).unwrap(), expected_timestamps ); - assert_eq!(blocktree.get_block_timestamps(2).unwrap(), vec![]); + assert_eq!(blockstore.get_block_timestamps(2).unwrap(), vec![]); // Build epoch vote_accounts HashMap to test stake-weighted block time - blocktree.set_roots(&[3, 8]).unwrap(); + blockstore.set_roots(&[3, 8]).unwrap(); let mut stakes = HashMap::new(); for (i, keypair) in vote_keypairs.iter().enumerate() { stakes.insert(keypair.pubkey(), (1 + i as u64, Account::default())); } let slot_duration = Duration::from_millis(400); - let block_time_slot_3 = blocktree.get_block_time(3, slot_duration.clone(), &stakes); + let block_time_slot_3 = blockstore.get_block_time(3, slot_duration.clone(), &stakes); let mut total_stake = 0; let mut expected_time: u64 = (0..6) @@ -4768,7 +4788,7 @@ pub mod tests { expected_time /= total_stake; assert_eq!(block_time_slot_3.unwrap() as u64, expected_time); assert_eq!( - blocktree + blockstore .get_block_time(8, slot_duration.clone(), &stakes) .unwrap() as u64, expected_time + 2 // At 400ms block duration, 5 slots == 2sec @@ -4777,10 +4797,10 @@ pub mod tests { #[test] fn test_persist_transaction_status() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); - let transaction_status_cf = blocktree.db.column::(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); + let transaction_status_cf = blockstore.db.column::(); let pre_balances_vec = vec![1, 2, 3]; let post_balances_vec = vec![3, 2, 1]; @@ -4851,7 +4871,7 @@ pub mod tests { assert_eq!(pre_balances, pre_balances_vec); assert_eq!(post_balances, post_balances_vec); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] @@ -4872,10 +4892,10 @@ pub mod tests { #[test] fn test_map_transactions_to_statuses() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); - let transaction_status_cf = blocktree.db.column::(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); + let transaction_status_cf = blockstore.db.column::(); let slot = 0; let mut transactions: Vec = vec![]; @@ -4911,7 +4931,7 @@ pub mod tests { vec![CompiledInstruction::new(1, &(), vec![0])], )); - let map = blocktree.map_transactions_to_statuses( + let map = blockstore.map_transactions_to_statuses( slot, RpcTransactionEncoding::Json, transactions.into_iter(), @@ -4922,24 +4942,24 @@ pub mod tests { } assert_eq!(map[4].1.as_ref(), None); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_lowest_slot() { - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); for i in 0..10 { let slot = i; let (shreds, _) = make_slot_entries(slot, 0, 1); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); } - assert_eq!(blocktree.lowest_slot(), 1); - blocktree.run_purge(0, 5).unwrap(); - assert_eq!(blocktree.lowest_slot(), 6); + assert_eq!(blockstore.lowest_slot(), 1); + blockstore.run_purge(0, 5).unwrap(); + assert_eq!(blockstore.lowest_slot(), 6); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] @@ -4947,10 +4967,10 @@ pub mod tests { let slot = 1; let (data_shreds, coding_shreds, leader_schedule_cache) = setup_erasure_shreds(slot, 0, 100, 1.0); - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); - blocktree + let blockstore = Blockstore::open(&blockstore_path).unwrap(); + blockstore .insert_shreds(coding_shreds, Some(&leader_schedule_cache), false) .unwrap(); let shred_bufs: Vec<_> = data_shreds @@ -4961,7 +4981,7 @@ pub mod tests { // Check all the data shreds were recovered for (s, buf) in data_shreds.iter().zip(shred_bufs) { assert_eq!( - blocktree + blockstore .get_data_shred(s.slot(), s.index() as u64) .unwrap() .unwrap(), @@ -4969,9 +4989,9 @@ pub mod tests { ); } - verify_index_integrity(&blocktree, slot); + verify_index_integrity(&blockstore, slot); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] @@ -4982,38 +5002,38 @@ pub mod tests { setup_erasure_shreds(slot, 0, num_entries, 1.0); assert!(data_shreds.len() > 3); assert!(coding_shreds.len() > 3); - let blocktree_path = get_tmp_ledger_path!(); + let blockstore_path = get_tmp_ledger_path!(); { - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Test inserting all the shreds let all_shreds: Vec<_> = data_shreds .iter() .cloned() .chain(coding_shreds.iter().cloned()) .collect(); - blocktree + blockstore .insert_shreds(all_shreds, Some(&leader_schedule_cache), false) .unwrap(); - verify_index_integrity(&blocktree, slot); - blocktree.purge_slots(0, Some(slot)); + verify_index_integrity(&blockstore, slot); + blockstore.purge_slots(0, Some(slot)); // Test inserting just the codes, enough for recovery - blocktree + blockstore .insert_shreds(coding_shreds.clone(), Some(&leader_schedule_cache), false) .unwrap(); - verify_index_integrity(&blocktree, slot); - blocktree.purge_slots(0, Some(slot)); + verify_index_integrity(&blockstore, slot); + blockstore.purge_slots(0, Some(slot)); // Test inserting some codes, but not enough for recovery - blocktree + blockstore .insert_shreds( coding_shreds[..coding_shreds.len() - 1].to_vec(), Some(&leader_schedule_cache), false, ) .unwrap(); - verify_index_integrity(&blocktree, slot); - blocktree.purge_slots(0, Some(slot)); + verify_index_integrity(&blockstore, slot); + blockstore.purge_slots(0, Some(slot)); // Test inserting just the codes, and some data, enough for recovery let shreds: Vec<_> = data_shreds[..data_shreds.len() - 1] @@ -5021,11 +5041,11 @@ pub mod tests { .cloned() .chain(coding_shreds[..coding_shreds.len() - 1].iter().cloned()) .collect(); - blocktree + blockstore .insert_shreds(shreds, Some(&leader_schedule_cache), false) .unwrap(); - verify_index_integrity(&blocktree, slot); - blocktree.purge_slots(0, Some(slot)); + verify_index_integrity(&blockstore, slot); + blockstore.purge_slots(0, Some(slot)); // Test inserting some codes, and some data, but enough for recovery let shreds: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1] @@ -5033,11 +5053,11 @@ pub mod tests { .cloned() .chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned()) .collect(); - blocktree + blockstore .insert_shreds(shreds, Some(&leader_schedule_cache), false) .unwrap(); - verify_index_integrity(&blocktree, slot); - blocktree.purge_slots(0, Some(slot)); + verify_index_integrity(&blockstore, slot); + blockstore.purge_slots(0, Some(slot)); // Test inserting all shreds in 2 rounds, make sure nothing is lost let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1] @@ -5050,14 +5070,14 @@ pub mod tests { .cloned() .chain(coding_shreds[coding_shreds.len() / 2 - 1..].iter().cloned()) .collect(); - blocktree + blockstore .insert_shreds(shreds1, Some(&leader_schedule_cache), false) .unwrap(); - blocktree + blockstore .insert_shreds(shreds2, Some(&leader_schedule_cache), false) .unwrap(); - verify_index_integrity(&blocktree, slot); - blocktree.purge_slots(0, Some(slot)); + verify_index_integrity(&blockstore, slot); + blockstore.purge_slots(0, Some(slot)); // Test not all, but enough data and coding shreds in 2 rounds to trigger recovery, // make sure nothing is lost @@ -5075,14 +5095,14 @@ pub mod tests { .cloned(), ) .collect(); - blocktree + blockstore .insert_shreds(shreds1, Some(&leader_schedule_cache), false) .unwrap(); - blocktree + blockstore .insert_shreds(shreds2, Some(&leader_schedule_cache), false) .unwrap(); - verify_index_integrity(&blocktree, slot); - blocktree.purge_slots(0, Some(slot)); + verify_index_integrity(&blockstore, slot); + blockstore.purge_slots(0, Some(slot)); // Test insert shreds in 2 rounds, but not enough to trigger // recovery, make sure nothing is lost @@ -5100,16 +5120,16 @@ pub mod tests { .cloned(), ) .collect(); - blocktree + blockstore .insert_shreds(shreds1, Some(&leader_schedule_cache), false) .unwrap(); - blocktree + blockstore .insert_shreds(shreds2, Some(&leader_schedule_cache), false) .unwrap(); - verify_index_integrity(&blocktree, slot); - blocktree.purge_slots(0, Some(slot)); + verify_index_integrity(&blockstore, slot); + blockstore.purge_slots(0, Some(slot)); } - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } fn setup_erasure_shreds( @@ -5145,15 +5165,15 @@ pub mod tests { (data_shreds, coding_shreds, Arc::new(leader_schedule_cache)) } - fn verify_index_integrity(blocktree: &Blocktree, slot: u64) { - let index = blocktree.get_index(slot).unwrap().unwrap(); + fn verify_index_integrity(blockstore: &Blockstore, slot: u64) { + let index = blockstore.get_index(slot).unwrap().unwrap(); // Test the set of data shreds in the index and in the data column // family are the same - let data_iter = blocktree.slot_data_iterator(slot).unwrap(); + let data_iter = blockstore.slot_data_iterator(slot).unwrap(); let mut num_data = 0; for ((slot, index), _) in data_iter { num_data += 1; - assert!(blocktree.get_data_shred(slot, index).unwrap().is_some()); + assert!(blockstore.get_data_shred(slot, index).unwrap().is_some()); } // Test the data index doesn't have anything extra @@ -5162,11 +5182,11 @@ pub mod tests { // Test the set of coding shreds in the index and in the coding column // family are the same - let coding_iter = blocktree.slot_coding_iterator(slot).unwrap(); + let coding_iter = blockstore.slot_coding_iterator(slot).unwrap(); let mut num_coding = 0; for ((slot, index), _) in coding_iter { num_coding += 1; - assert!(blocktree.get_coding_shred(slot, index).unwrap().is_some()); + assert!(blockstore.get_coding_shred(slot, index).unwrap().is_some()); } // Test the data index doesn't have anything extra diff --git a/ledger/src/blocktree_db.rs b/ledger/src/blockstore_db.rs similarity index 98% rename from ledger/src/blocktree_db.rs rename to ledger/src/blockstore_db.rs index c4dbde38dbc956..a801cb4083ec1a 100644 --- a/ledger/src/blocktree_db.rs +++ b/ledger/src/blockstore_db.rs @@ -1,4 +1,4 @@ -use crate::blocktree_meta; +use crate::blockstore_meta; use bincode::{deserialize, serialize}; use byteorder::{BigEndian, ByteOrder}; use fs_extra; @@ -36,7 +36,7 @@ const CODE_SHRED_CF: &str = "code_shred"; const TRANSACTION_STATUS_CF: &str = "transaction_status"; #[derive(Error, Debug)] -pub enum BlocktreeError { +pub enum BlockstoreError { ShredForIndexExists, InvalidShredData(Box), RocksDb(#[from] rocksdb::Error), @@ -46,11 +46,11 @@ pub enum BlocktreeError { Serialize(#[from] Box), FsExtraError(#[from] fs_extra::error::Error), } -pub(crate) type Result = std::result::Result; +pub(crate) type Result = std::result::Result; -impl std::fmt::Display for BlocktreeError { +impl std::fmt::Display for BlockstoreError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "blocktree error") + write!(f, "blockstore error") } } @@ -344,7 +344,7 @@ impl Column for columns::Index { } impl TypedColumn for columns::Index { - type Type = blocktree_meta::Index; + type Type = blockstore_meta::Index; } impl Column for columns::DeadSlots { @@ -452,7 +452,7 @@ impl Column for columns::SlotMeta { } impl TypedColumn for columns::SlotMeta { - type Type = blocktree_meta::SlotMeta; + type Type = blockstore_meta::SlotMeta; } impl Column for columns::ErasureMeta { @@ -483,7 +483,7 @@ impl Column for columns::ErasureMeta { } impl TypedColumn for columns::ErasureMeta { - type Type = blocktree_meta::ErasureMeta; + type Type = blockstore_meta::ErasureMeta; } #[derive(Debug, Clone)] diff --git a/ledger/src/blocktree_meta.rs b/ledger/src/blockstore_meta.rs similarity index 99% rename from ledger/src/blocktree_meta.rs rename to ledger/src/blockstore_meta.rs index 00c93d248e6532..cfa542313fdf5c 100644 --- a/ledger/src/blocktree_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -131,7 +131,7 @@ impl SlotMeta { // Should never happen if self.consumed > self.last_index + 1 { datapoint!( - "blocktree_error", + "blockstore_error", ( "error", format!( diff --git a/ledger/src/blocktree_processor.rs b/ledger/src/blockstore_processor.rs similarity index 88% rename from ledger/src/blocktree_processor.rs rename to ledger/src/blockstore_processor.rs index 677358f0efb692..028b9b2708ba24 100644 --- a/ledger/src/blocktree_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1,8 +1,8 @@ use crate::{ bank_forks::BankForks, block_error::BlockError, - blocktree::Blocktree, - blocktree_meta::SlotMeta, + blockstore::Blockstore, + blockstore_meta::SlotMeta, entry::{create_ticks, Entry, EntrySlice}, leader_schedule_cache::LeaderScheduleCache, }; @@ -235,7 +235,7 @@ pub struct BankForksInfo { } #[derive(Error, Debug, PartialEq)] -pub enum BlocktreeProcessorError { +pub enum BlockstoreProcessorError { #[error("failed to load entries")] FailedToLoadEntries, @@ -252,7 +252,7 @@ pub enum BlocktreeProcessorError { NoValidForksFound, } -/// Callback for accessing bank state while processing the blocktree +/// Callback for accessing bank state while processing the blockstore pub type ProcessCallback = Arc () + Sync + Send>; #[derive(Default, Clone)] @@ -264,12 +264,13 @@ pub struct ProcessOptions { pub override_num_threads: Option, } -pub fn process_blocktree( +pub fn process_blockstore( genesis_config: &GenesisConfig, - blocktree: &Blocktree, + blockstore: &Blockstore, account_paths: Vec, opts: ProcessOptions, -) -> result::Result<(BankForks, Vec, LeaderScheduleCache), BlocktreeProcessorError> { +) -> result::Result<(BankForks, Vec, LeaderScheduleCache), BlockstoreProcessorError> +{ if let Some(num_threads) = opts.override_num_threads { PAR_THREAD_POOL.with(|pool| { *pool.borrow_mut() = rayon::ThreadPoolBuilder::new() @@ -282,17 +283,18 @@ pub fn process_blocktree( // Setup bank for slot 0 let bank0 = Arc::new(Bank::new_with_paths(&genesis_config, account_paths)); info!("processing ledger for slot 0..."); - process_bank_0(&bank0, blocktree, &opts)?; - process_blocktree_from_root(genesis_config, blocktree, bank0, &opts) + process_bank_0(&bank0, blockstore, &opts)?; + process_blockstore_from_root(genesis_config, blockstore, bank0, &opts) } -// Process blocktree from a known root bank -pub fn process_blocktree_from_root( +// Process blockstore from a known root bank +pub fn process_blockstore_from_root( genesis_config: &GenesisConfig, - blocktree: &Blocktree, + blockstore: &Blockstore, bank: Arc, opts: &ProcessOptions, -) -> result::Result<(BankForks, Vec, LeaderScheduleCache), BlocktreeProcessorError> { +) -> result::Result<(BankForks, Vec, LeaderScheduleCache), BlockstoreProcessorError> +{ info!("processing ledger from root slot {}...", bank.slot()); let allocated = thread_mem_usage::Allocatedp::default(); let initial_allocation = allocated.get(); @@ -307,13 +309,13 @@ pub fn process_blocktree_from_root( genesis_config.operating_mode, )); - blocktree + blockstore .set_roots(&[start_slot]) .expect("Couldn't set root slot on startup"); - let meta = blocktree.meta(start_slot).unwrap(); + let meta = blockstore.meta(start_slot).unwrap(); - // Iterate and replay slots from blocktree starting from `start_slot` + // Iterate and replay slots from blockstore starting from `start_slot` let (bank_forks, bank_forks_info, leader_schedule_cache) = { if let Some(meta) = meta { let epoch_schedule = bank.epoch_schedule(); @@ -324,7 +326,7 @@ pub fn process_blocktree_from_root( let fork_info = process_pending_slots( &bank, &meta, - blocktree, + blockstore, &mut leader_schedule_cache, &mut rooted_path, opts, @@ -332,13 +334,13 @@ pub fn process_blocktree_from_root( let (banks, bank_forks_info): (Vec<_>, Vec<_>) = fork_info.into_iter().map(|(_, v)| v).unzip(); if banks.is_empty() { - return Err(BlocktreeProcessorError::NoValidForksFound); + return Err(BlockstoreProcessorError::NoValidForksFound); } let bank_forks = BankForks::new_from_banks(&banks, rooted_path); (bank_forks, bank_forks_info, leader_schedule_cache) } else { // If there's no meta for the input `start_slot`, then we started from a snapshot - // and there's no point in processing the rest of blocktree and implies blocktree + // and there's no point in processing the rest of blockstore and implies blockstore // should be empty past this point. let bfi = BankForksInfo { bank_slot: start_slot, @@ -369,7 +371,7 @@ fn verify_and_process_slot_entries( entries: &[Entry], last_entry_hash: Hash, opts: &ProcessOptions, -) -> result::Result { +) -> result::Result { assert!(!entries.is_empty()); if opts.poh_verify { @@ -409,7 +411,7 @@ fn verify_and_process_slot_entries( bank.slot(), err ); - BlocktreeProcessorError::InvalidTransaction + BlockstoreProcessorError::InvalidTransaction })?; Ok(entries.last().unwrap().hash) @@ -418,15 +420,15 @@ fn verify_and_process_slot_entries( // Special handling required for processing the entries in slot 0 fn process_bank_0( bank0: &Arc, - blocktree: &Blocktree, + blockstore: &Blockstore, opts: &ProcessOptions, -) -> result::Result<(), BlocktreeProcessorError> { +) -> result::Result<(), BlockstoreProcessorError> { assert_eq!(bank0.slot(), 0); // Fetch all entries for this slot - let entries = blocktree.get_slot_entries(0, 0, None).map_err(|err| { + let entries = blockstore.get_slot_entries(0, 0, None).map_err(|err| { warn!("Failed to load entries for slot 0, err: {:?}", err); - BlocktreeProcessorError::FailedToLoadEntries + BlockstoreProcessorError::FailedToLoadEntries })?; verify_and_process_slot_entries(bank0, &entries, bank0.last_blockhash(), opts) @@ -442,11 +444,11 @@ fn process_bank_0( fn process_next_slots( bank: &Arc, meta: &SlotMeta, - blocktree: &Blocktree, + blockstore: &Blockstore, leader_schedule_cache: &LeaderScheduleCache, pending_slots: &mut Vec<(SlotMeta, Arc, Hash)>, fork_info: &mut HashMap, BankForksInfo)>, -) -> result::Result<(), BlocktreeProcessorError> { +) -> result::Result<(), BlockstoreProcessorError> { if let Some(parent) = bank.parent() { fork_info.remove(&parent.slot()); } @@ -461,15 +463,15 @@ fn process_next_slots( // This is a fork point if there are multiple children, create a new child bank for each fork for next_slot in &meta.next_slots { - let next_meta = blocktree + let next_meta = blockstore .meta(*next_slot) .map_err(|err| { warn!("Failed to load meta for slot {}: {:?}", next_slot, err); - BlocktreeProcessorError::FailedToLoadMeta + BlockstoreProcessorError::FailedToLoadMeta })? .unwrap(); - // Only process full slots in blocktree_processor, replay_stage + // Only process full slots in blockstore_processor, replay_stage // handles any partials if next_meta.is_full() { let allocated = thread_mem_usage::Allocatedp::default(); @@ -497,16 +499,16 @@ fn process_next_slots( Ok(()) } -// Iterate through blocktree processing slots starting from the root slot pointed to by the +// Iterate through blockstore processing slots starting from the root slot pointed to by the // given `meta` fn process_pending_slots( root_bank: &Arc, root_meta: &SlotMeta, - blocktree: &Blocktree, + blockstore: &Blockstore, leader_schedule_cache: &mut LeaderScheduleCache, rooted_path: &mut Vec, opts: &ProcessOptions, -) -> result::Result, BankForksInfo)>, BlocktreeProcessorError> { +) -> result::Result, BankForksInfo)>, BlockstoreProcessorError> { let mut fork_info = HashMap::new(); let mut last_status_report = Instant::now(); let mut pending_slots = vec![]; @@ -514,7 +516,7 @@ fn process_pending_slots( process_next_slots( root_bank, root_meta, - blocktree, + blockstore, leader_schedule_cache, &mut pending_slots, &mut fork_info, @@ -535,11 +537,11 @@ fn process_pending_slots( let allocated = thread_mem_usage::Allocatedp::default(); let initial_allocation = allocated.get(); - if process_single_slot(blocktree, &bank, &last_entry_hash, opts).is_err() { + if process_single_slot(blockstore, &bank, &last_entry_hash, opts).is_err() { continue; } - if blocktree.is_root(slot) { + if blockstore.is_root(slot) { let parents = bank.parents().into_iter().map(|b| b.slot()).rev().skip(1); let parents: Vec<_> = parents.collect(); rooted_path.extend(parents); @@ -565,7 +567,7 @@ fn process_pending_slots( process_next_slots( &bank, &meta, - blocktree, + blockstore, leader_schedule_cache, &mut pending_slots, &mut fork_info, @@ -578,17 +580,17 @@ fn process_pending_slots( // Processes and replays the contents of a single slot, returns Error // if failed to play the slot fn process_single_slot( - blocktree: &Blocktree, + blockstore: &Blockstore, bank: &Arc, last_entry_hash: &Hash, opts: &ProcessOptions, -) -> result::Result<(), BlocktreeProcessorError> { +) -> result::Result<(), BlockstoreProcessorError> { let slot = bank.slot(); // Fetch all entries for this slot - let entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| { + let entries = blockstore.get_slot_entries(slot, 0, None).map_err(|err| { warn!("Failed to load entries for slot {}: {:?}", slot, err); - BlocktreeProcessorError::FailedToLoadEntries + BlockstoreProcessorError::FailedToLoadEntries })?; // If this errors with a fatal error, should mark the slot as dead so @@ -634,8 +636,8 @@ pub fn send_transaction_status_batch( } // used for tests only -pub fn fill_blocktree_slot_with_ticks( - blocktree: &Blocktree, +pub fn fill_blockstore_slot_with_ticks( + blockstore: &Blockstore, ticks_per_slot: u64, slot: u64, parent_slot: u64, @@ -647,7 +649,7 @@ pub fn fill_blocktree_slot_with_ticks( let entries = create_ticks(num_slots * ticks_per_slot, 0, last_entry_hash); let last_entry_hash = entries.last().unwrap().hash; - blocktree + blockstore .write_entries( slot, 0, @@ -688,7 +690,7 @@ pub mod tests { use std::sync::RwLock; #[test] - fn test_process_blocktree_with_missing_hashes() { + fn test_process_blockstore_with_missing_hashes() { solana_logger::setup(); let hashes_per_tick = 2; @@ -699,14 +701,14 @@ pub mod tests { let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); + let blockstore = + Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger"); let parent_slot = 0; let slot = 1; let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash); assert_matches!( - blocktree.write_entries( + blockstore.write_entries( slot, 0, 0, @@ -720,9 +722,9 @@ pub mod tests { Ok(_) ); - let (_bank_forks, bank_forks_info, _) = process_blocktree( + let (_bank_forks, bank_forks_info, _) = process_blockstore( &genesis_config, - &blocktree, + &blockstore, Vec::new(), ProcessOptions { poh_verify: true, @@ -734,7 +736,7 @@ pub mod tests { } #[test] - fn test_process_blocktree_with_invalid_slot_tick_count() { + fn test_process_blockstore_with_invalid_slot_tick_count() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); @@ -742,14 +744,14 @@ pub mod tests { // Create a new ledger with slot 0 full of ticks let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); // Write slot 1 with one tick missing let parent_slot = 0; let slot = 1; let entries = create_ticks(ticks_per_slot - 1, 0, blockhash); assert_matches!( - blocktree.write_entries( + blockstore.write_entries( slot, 0, 0, @@ -764,9 +766,9 @@ pub mod tests { ); // Should return slot 0, the last slot on the fork that is valid - let (_bank_forks, bank_forks_info, _) = process_blocktree( + let (_bank_forks, bank_forks_info, _) = process_blockstore( &genesis_config, - &blocktree, + &blockstore, Vec::new(), ProcessOptions { poh_verify: true, @@ -778,11 +780,11 @@ pub mod tests { // Write slot 2 fully let _last_slot2_entry_hash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 0, blockhash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash); - let (_bank_forks, bank_forks_info, _) = process_blocktree( + let (_bank_forks, bank_forks_info, _) = process_blockstore( &genesis_config, - &blocktree, + &blockstore, Vec::new(), ProcessOptions { poh_verify: true, @@ -791,12 +793,12 @@ pub mod tests { ) .unwrap(); - // One valid fork, one bad fork. process_blocktree() should only return the valid fork + // One valid fork, one bad fork. process_blockstore() should only return the valid fork assert_eq!(bank_forks_info, vec![BankForksInfo { bank_slot: 2 }]); } #[test] - fn test_process_blocktree_with_slot_with_trailing_entry() { + fn test_process_blockstore_with_slot_with_trailing_entry() { solana_logger::setup(); let GenesisConfigInfo { @@ -807,7 +809,7 @@ pub mod tests { let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let mut entries = create_ticks(ticks_per_slot, 0, blockhash); let trailing_entry = { @@ -817,12 +819,12 @@ pub mod tests { }; entries.push(trailing_entry); - // Tricks blocktree into writing the trailing entry by lying that there is one more tick + // Tricks blockstore into writing the trailing entry by lying that there is one more tick // per slot. let parent_slot = 0; let slot = 1; assert_matches!( - blocktree.write_entries( + blockstore.write_entries( slot, 0, 0, @@ -841,19 +843,19 @@ pub mod tests { ..ProcessOptions::default() }; let (_bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap(); assert_eq!(bank_forks_info, vec![BankForksInfo { bank_slot: 0 }]); } #[test] - fn test_process_blocktree_with_incomplete_slot() { + fn test_process_blockstore_with_incomplete_slot() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; /* - Build a blocktree in the ledger with the following fork structure: + Build a blockstore in the ledger with the following fork structure: slot 0 (all ticks) | @@ -868,8 +870,8 @@ pub mod tests { let (ledger_path, mut blockhash) = create_new_tmp_ledger!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); + let blockstore = + Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger"); // Write slot 1 // slot 1, points at slot 0. Missing one tick @@ -883,7 +885,7 @@ pub mod tests { entries.pop(); assert_matches!( - blocktree.write_entries( + blockstore.write_entries( slot, 0, 0, @@ -899,14 +901,14 @@ pub mod tests { } // slot 2, points at slot 1 - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, blockhash); let opts = ProcessOptions { poh_verify: true, ..ProcessOptions::default() }; let (mut _bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts.clone()).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts.clone()).unwrap(); assert_eq!(bank_forks_info.len(), 1); assert_eq!( @@ -928,10 +930,10 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }; - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 0, blockhash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 0, blockhash); // Slot 0 should not show up in the ending bank_forks_info let (mut _bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); assert_eq!( @@ -943,7 +945,7 @@ pub mod tests { } #[test] - fn test_process_blocktree_with_two_forks_and_squash() { + fn test_process_blockstore_with_two_forks_and_squash() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); @@ -955,7 +957,7 @@ pub mod tests { let mut last_entry_hash = blockhash; /* - Build a blocktree in the ledger with the following fork structure: + Build a blockstore in the ledger with the following fork structure: slot 0 | @@ -968,32 +970,42 @@ pub mod tests { slot 4 <-- set_root(true) */ - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); + let blockstore = + Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger"); // Fork 1, ending at slot 3 let last_slot1_entry_hash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, last_entry_hash); - last_entry_hash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, last_slot1_entry_hash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash); + last_entry_hash = fill_blockstore_slot_with_ticks( + &blockstore, + ticks_per_slot, + 2, + 1, + last_slot1_entry_hash, + ); let last_fork1_entry_hash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 2, last_entry_hash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash); // Fork 2, ending at slot 4 - let last_fork2_entry_hash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, 1, last_slot1_entry_hash); + let last_fork2_entry_hash = fill_blockstore_slot_with_ticks( + &blockstore, + ticks_per_slot, + 4, + 1, + last_slot1_entry_hash, + ); info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); - blocktree.set_roots(&[0, 1, 4]).unwrap(); + blockstore.set_roots(&[0, 1, 4]).unwrap(); let opts = ProcessOptions { poh_verify: true, ..ProcessOptions::default() }; let (bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root @@ -1017,7 +1029,7 @@ pub mod tests { } #[test] - fn test_process_blocktree_with_two_forks() { + fn test_process_blockstore_with_two_forks() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); @@ -1029,7 +1041,7 @@ pub mod tests { let mut last_entry_hash = blockhash; /* - Build a blocktree in the ledger with the following fork structure: + Build a blockstore in the ledger with the following fork structure: slot 0 | @@ -1042,32 +1054,42 @@ pub mod tests { slot 4 */ - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); + let blockstore = + Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger"); // Fork 1, ending at slot 3 let last_slot1_entry_hash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, last_entry_hash); - last_entry_hash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, last_slot1_entry_hash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash); + last_entry_hash = fill_blockstore_slot_with_ticks( + &blockstore, + ticks_per_slot, + 2, + 1, + last_slot1_entry_hash, + ); let last_fork1_entry_hash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 2, last_entry_hash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash); // Fork 2, ending at slot 4 - let last_fork2_entry_hash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, 1, last_slot1_entry_hash); + let last_fork2_entry_hash = fill_blockstore_slot_with_ticks( + &blockstore, + ticks_per_slot, + 4, + 1, + last_slot1_entry_hash, + ); info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); - blocktree.set_roots(&[0, 1]).unwrap(); + blockstore.set_roots(&[0, 1]).unwrap(); let opts = ProcessOptions { poh_verify: true, ..ProcessOptions::default() }; let (bank_forks, mut bank_forks_info, _) = - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap(); bank_forks_info.sort_by(|a, b| a.bank_slot.cmp(&b.bank_slot)); assert_eq!(bank_forks_info.len(), 2); // There are two forks @@ -1107,7 +1129,7 @@ pub mod tests { } #[test] - fn test_process_blocktree_with_dead_slot() { + fn test_process_blockstore_with_dead_slot() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); @@ -1125,16 +1147,16 @@ pub mod tests { \ slot 3 */ - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let slot1_blockhash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, blockhash); - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, slot1_blockhash); - blocktree.set_dead_slot(2).unwrap(); - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 1, slot1_blockhash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash); + blockstore.set_dead_slot(2).unwrap(); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash); - let (bank_forks, bank_forks_info, _) = process_blocktree( + let (bank_forks, bank_forks_info, _) = process_blockstore( &genesis_config, - &blocktree, + &blockstore, Vec::new(), ProcessOptions::default(), ) @@ -1154,7 +1176,7 @@ pub mod tests { } #[test] - fn test_process_blocktree_with_dead_child() { + fn test_process_blockstore_with_dead_child() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); @@ -1172,18 +1194,18 @@ pub mod tests { / \ slot 4 (dead) slot 3 */ - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let slot1_blockhash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, blockhash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash); let slot2_blockhash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, slot1_blockhash); - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, 2, slot2_blockhash); - blocktree.set_dead_slot(4).unwrap(); - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 1, slot1_blockhash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, 2, slot2_blockhash); + blockstore.set_dead_slot(4).unwrap(); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash); - let (bank_forks, mut bank_forks_info, _) = process_blocktree( + let (bank_forks, mut bank_forks_info, _) = process_blockstore( &genesis_config, - &blocktree, + &blockstore, Vec::new(), ProcessOptions::default(), ) @@ -1229,14 +1251,14 @@ pub mod tests { / \ slot 1 (dead) slot 2 (dead) */ - let blocktree = Blocktree::open(&ledger_path).unwrap(); - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, blockhash); - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 0, blockhash); - blocktree.set_dead_slot(1).unwrap(); - blocktree.set_dead_slot(2).unwrap(); - let (bank_forks, bank_forks_info, _) = process_blocktree( + let blockstore = Blockstore::open(&ledger_path).unwrap(); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash); + blockstore.set_dead_slot(1).unwrap(); + blockstore.set_dead_slot(2).unwrap(); + let (bank_forks, bank_forks_info, _) = process_blockstore( &genesis_config, - &blocktree, + &blockstore, Vec::new(), ProcessOptions::default(), ) @@ -1249,7 +1271,7 @@ pub mod tests { } #[test] - fn test_process_blocktree_epoch_boundary_root() { + fn test_process_blockstore_epoch_boundary_root() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); @@ -1259,8 +1281,8 @@ pub mod tests { let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config); let mut last_entry_hash = blockhash; - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); + let blockstore = + Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger"); // Let last_slot be the number of slots in the first two epochs let epoch_schedule = get_epoch_schedule(&genesis_config, Vec::new()); @@ -1268,8 +1290,8 @@ pub mod tests { // Create a single chain of slots with all indexes in the range [0, last_slot + 1] for i in 1..=last_slot + 1 { - last_entry_hash = fill_blocktree_slot_with_ticks( - &blocktree, + last_entry_hash = fill_blockstore_slot_with_ticks( + &blockstore, ticks_per_slot, i, i - 1, @@ -1279,10 +1301,10 @@ pub mod tests { // Set a root on the last slot of the last confirmed epoch let rooted_slots: Vec<_> = (0..=last_slot).collect(); - blocktree.set_roots(&rooted_slots).unwrap(); + blockstore.set_roots(&rooted_slots).unwrap(); // Set a root on the next slot of the confrimed epoch - blocktree.set_roots(&[last_slot + 1]).unwrap(); + blockstore.set_roots(&[last_slot + 1]).unwrap(); // Check that we can properly restart the ledger / leader scheduler doesn't fail let opts = ProcessOptions { @@ -1290,7 +1312,7 @@ pub mod tests { ..ProcessOptions::default() }; let (bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); // There is one fork assert_eq!( @@ -1418,9 +1440,9 @@ pub mod tests { )); let last_blockhash = entries.last().unwrap().hash; - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); - blocktree + let blockstore = + Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger"); + blockstore .write_entries( 1, 0, @@ -1438,7 +1460,7 @@ pub mod tests { ..ProcessOptions::default() }; let (bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); assert_eq!(bank_forks.root(), 0); @@ -1461,13 +1483,13 @@ pub mod tests { genesis_config.ticks_per_slot = 1; let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let opts = ProcessOptions { poh_verify: true, ..ProcessOptions::default() }; let (bank_forks, bank_forks_info, _) = - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); assert_eq!(bank_forks_info[0], BankForksInfo { bank_slot: 0 }); @@ -1480,12 +1502,12 @@ pub mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let opts = ProcessOptions { override_num_threads: Some(1), ..ProcessOptions::default() }; - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap(); PAR_THREAD_POOL.with(|pool| { assert_eq!(pool.borrow().current_num_threads(), 1); }); @@ -1496,13 +1518,13 @@ pub mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); let opts = ProcessOptions { full_leader_cache: true, ..ProcessOptions::default() }; let (_bank_forks, _bank_forks_info, cached_leader_schedule) = - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap(); assert_eq!(cached_leader_schedule.max_schedules(), std::usize::MAX); } @@ -1514,8 +1536,8 @@ pub mod tests { .. } = create_genesis_config(100); let (ledger_path, last_entry_hash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = - Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); + let blockstore = + Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger"); let blockhash = genesis_config.hash(); let keypairs = [Keypair::new(), Keypair::new(), Keypair::new()]; @@ -1531,7 +1553,7 @@ pub mod tests { 0, last_entry_hash, )); - blocktree + blockstore .write_entries( 1, 0, @@ -1562,7 +1584,7 @@ pub mod tests { entry_callback: Some(entry_callback), ..ProcessOptions::default() }; - process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); + process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap(); assert_eq!(*callback_counter.write().unwrap(), 2); } @@ -2183,7 +2205,7 @@ pub mod tests { } #[test] - fn test_process_blocktree_from_root() { + fn test_process_blockstore_from_root() { let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config(123); @@ -2191,10 +2213,10 @@ pub mod tests { let ticks_per_slot = 1; genesis_config.ticks_per_slot = ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config); - let blocktree = Blocktree::open(&ledger_path).unwrap(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); /* - Build a blocktree in the ledger with the following fork structure: + Build a blockstore in the ledger with the following fork structure: slot 0 (all ticks) | @@ -2214,9 +2236,9 @@ pub mod tests { let mut last_hash = blockhash; for i in 0..6 { last_hash = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, i + 1, i, last_hash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash); } - blocktree.set_roots(&[3, 5]).unwrap(); + blockstore.set_roots(&[3, 5]).unwrap(); // Set up bank1 let bank0 = Arc::new(Bank::new(&genesis_config)); @@ -2224,16 +2246,16 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }; - process_bank_0(&bank0, &blocktree, &opts).unwrap(); + process_bank_0(&bank0, &blockstore, &opts).unwrap(); let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1)); - let slot1_entries = blocktree.get_slot_entries(1, 0, None).unwrap(); + let slot1_entries = blockstore.get_slot_entries(1, 0, None).unwrap(); verify_and_process_slot_entries(&bank1, &slot1_entries, bank0.last_blockhash(), &opts) .unwrap(); bank1.squash(); - // Test process_blocktree_from_root() from slot 1 onwards + // Test process_blockstore_from_root() from slot 1 onwards let (bank_forks, bank_forks_info, _) = - process_blocktree_from_root(&genesis_config, &blocktree, bank1, &opts).unwrap(); + process_blockstore_from_root(&genesis_config, &blockstore, bank1, &opts).unwrap(); assert_eq!(bank_forks_info.len(), 1); // One fork assert_eq!( diff --git a/ledger/src/erasure.rs b/ledger/src/erasure.rs index e7d6d30fa1cbfd..a1322724c3daaa 100644 --- a/ledger/src/erasure.rs +++ b/ledger/src/erasure.rs @@ -135,7 +135,7 @@ pub mod test { use solana_sdk::clock::Slot; /// Specifies the contents of a 16-data-shred and 4-coding-shred erasure set - /// Exists to be passed to `generate_blocktree_with_coding` + /// Exists to be passed to `generate_blockstore_with_coding` #[derive(Debug, Copy, Clone)] pub struct ErasureSpec { /// Which 16-shred erasure set this represents @@ -145,7 +145,7 @@ pub mod test { } /// Specifies the contents of a slot - /// Exists to be passed to `generate_blocktree_with_coding` + /// Exists to be passed to `generate_blockstore_with_coding` #[derive(Debug, Clone)] pub struct SlotSpec { pub slot: Slot, diff --git a/ledger/src/leader_schedule_cache.rs b/ledger/src/leader_schedule_cache.rs index e5b6a05677967b..981f5babed3a49 100644 --- a/ledger/src/leader_schedule_cache.rs +++ b/ledger/src/leader_schedule_cache.rs @@ -1,5 +1,5 @@ use crate::{ - blocktree::Blocktree, + blockstore::Blockstore, leader_schedule::{FixedSchedule, LeaderSchedule}, leader_schedule_utils, }; @@ -105,7 +105,7 @@ impl LeaderScheduleCache { pubkey: &Pubkey, mut current_slot: Slot, bank: &Bank, - blocktree: Option<&Blocktree>, + blockstore: Option<&Blockstore>, ) -> Option<(Slot, Slot)> { let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1); let mut first_slot = None; @@ -132,8 +132,8 @@ impl LeaderScheduleCache { for i in start_index..bank.get_slots_in_epoch(epoch) { current_slot += 1; if *pubkey == leader_schedule[i] { - if let Some(blocktree) = blocktree { - if let Some(meta) = blocktree.meta(current_slot).unwrap() { + if let Some(blockstore) = blockstore { + if let Some(meta) = blockstore.meta(current_slot).unwrap() { // We have already sent a shred for this slot, so skip it if meta.received > 0 { continue; @@ -255,7 +255,7 @@ impl LeaderScheduleCache { mod tests { use super::*; use crate::{ - blocktree::make_slot_entries, + blockstore::make_slot_entries, genesis_utils::{ create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo, BOOTSTRAP_LEADER_LAMPORTS, @@ -424,7 +424,7 @@ mod tests { } #[test] - fn test_next_leader_slot_blocktree() { + fn test_next_leader_slot_blockstore() { let pubkey = Pubkey::new_rand(); let mut genesis_config = create_genesis_config_with_leader( BOOTSTRAP_LEADER_LAMPORTS, @@ -438,8 +438,9 @@ mod tests { let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); let ledger_path = get_tmp_ledger_path!(); { - let blocktree = Arc::new( - Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), + let blockstore = Arc::new( + Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"), ); assert_eq!( @@ -449,7 +450,7 @@ mod tests { // Check that the next leader slot after 0 is slot 1 assert_eq!( cache - .next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)) + .next_leader_slot(&pubkey, 0, &bank, Some(&blockstore)) .unwrap() .0, 1 @@ -458,10 +459,10 @@ mod tests { // Write a shred into slot 2 that chains to slot 1, // but slot 1 is empty so should not be skipped let (shreds, _) = make_slot_entries(2, 1, 1); - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); assert_eq!( cache - .next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)) + .next_leader_slot(&pubkey, 0, &bank, Some(&blockstore)) .unwrap() .0, 1 @@ -471,10 +472,10 @@ mod tests { let (shreds, _) = make_slot_entries(1, 0, 1); // Check that slot 1 and 2 are skipped - blocktree.insert_shreds(shreds, None, false).unwrap(); + blockstore.insert_shreds(shreds, None, false).unwrap(); assert_eq!( cache - .next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)) + .next_leader_slot(&pubkey, 0, &bank, Some(&blockstore)) .unwrap() .0, 3 @@ -486,7 +487,7 @@ mod tests { &pubkey, 2 * genesis_config.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2 &bank, - Some(&blocktree) + Some(&blockstore) ), None ); @@ -496,12 +497,12 @@ mod tests { &Pubkey::new_rand(), // not in leader_schedule 0, &bank, - Some(&blocktree) + Some(&blockstore) ), None ); } - Blocktree::destroy(&ledger_path).unwrap(); + Blockstore::destroy(&ledger_path).unwrap(); } #[test] diff --git a/ledger/src/lib.rs b/ledger/src/lib.rs index 731d156338d493..c7d2f719044f4b 100644 --- a/ledger/src/lib.rs +++ b/ledger/src/lib.rs @@ -2,10 +2,10 @@ pub mod bank_forks; pub mod bank_forks_utils; pub mod block_error; #[macro_use] -pub mod blocktree; -pub mod blocktree_db; -mod blocktree_meta; -pub mod blocktree_processor; +pub mod blockstore; +pub mod blockstore_db; +mod blockstore_meta; +pub mod blockstore_processor; pub mod entry; pub mod erasure; pub mod genesis_utils; diff --git a/ledger/src/rooted_slot_iterator.rs b/ledger/src/rooted_slot_iterator.rs index fbe15d5e423fd1..3b95eece55b380 100644 --- a/ledger/src/rooted_slot_iterator.rs +++ b/ledger/src/rooted_slot_iterator.rs @@ -1,24 +1,24 @@ -use crate::blocktree_db::Result; -use crate::{blocktree::*, blocktree_meta::SlotMeta}; +use crate::blockstore_db::Result; +use crate::{blockstore::*, blockstore_meta::SlotMeta}; use log::*; use solana_sdk::clock::Slot; pub struct RootedSlotIterator<'a> { next_slots: Vec, prev_root: Slot, - blocktree: &'a Blocktree, + blockstore: &'a Blockstore, } impl<'a> RootedSlotIterator<'a> { - pub fn new(start_slot: Slot, blocktree: &'a Blocktree) -> Result { - if blocktree.is_root(start_slot) { + pub fn new(start_slot: Slot, blockstore: &'a Blockstore) -> Result { + if blockstore.is_root(start_slot) { Ok(Self { next_slots: vec![start_slot], prev_root: start_slot, - blocktree, + blockstore, }) } else { - Err(BlocktreeError::SlotNotRooted) + Err(BlockstoreError::SlotNotRooted) } } } @@ -31,11 +31,11 @@ impl<'a> Iterator for RootedSlotIterator<'a> { let (rooted_slot, slot_skipped) = self .next_slots .iter() - .find(|x| self.blocktree.is_root(**x)) + .find(|x| self.blockstore.is_root(**x)) .map(|x| (Some(*x), false)) .unwrap_or_else(|| { let mut iter = self - .blocktree + .blockstore .rooted_slot_iterator( // First iteration the root always exists as guaranteed by the constructor, // so this unwrap_or_else cases won't be hit. Every subsequent iteration @@ -49,7 +49,7 @@ impl<'a> Iterator for RootedSlotIterator<'a> { let slot_meta = rooted_slot .map(|r| { - self.blocktree + self.blockstore .meta(r) .expect("Database failure, couldnt fetch SlotMeta") }) @@ -77,17 +77,17 @@ impl<'a> Iterator for RootedSlotIterator<'a> { #[cfg(test)] mod tests { use super::*; - use crate::blocktree_processor::fill_blocktree_slot_with_ticks; + use crate::blockstore_processor::fill_blockstore_slot_with_ticks; use solana_sdk::hash::Hash; #[test] fn test_rooted_slot_iterator() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); - blocktree.set_roots(&[0]).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); + blockstore.set_roots(&[0]).unwrap(); let ticks_per_slot = 5; /* - Build a blocktree in the ledger with the following fork structure: + Build a blockstore in the ledger with the following fork structure: slot 0 | @@ -113,8 +113,8 @@ mod tests { slot - 1 } }; - let last_entry_hash = fill_blocktree_slot_with_ticks( - &blocktree, + let last_entry_hash = fill_blockstore_slot_with_ticks( + &blockstore, ticks_per_slot, slot, parent, @@ -128,16 +128,16 @@ mod tests { // Fork 2, ending at slot 4 let _ = - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, fork_point, fork_hash); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, fork_point, fork_hash); // Set a root - blocktree.set_roots(&[1, 2, 3]).unwrap(); + blockstore.set_roots(&[1, 2, 3]).unwrap(); // Trying to get an iterator on a different fork will error - assert!(RootedSlotIterator::new(4, &blocktree).is_err()); + assert!(RootedSlotIterator::new(4, &blockstore).is_err()); // Trying to get an iterator on any slot on the root fork should succeed - let result: Vec<_> = RootedSlotIterator::new(3, &blocktree) + let result: Vec<_> = RootedSlotIterator::new(3, &blockstore) .unwrap() .into_iter() .map(|(slot, _)| slot) @@ -145,7 +145,7 @@ mod tests { let expected = vec![3]; assert_eq!(result, expected); - let result: Vec<_> = RootedSlotIterator::new(0, &blocktree) + let result: Vec<_> = RootedSlotIterator::new(0, &blockstore) .unwrap() .into_iter() .map(|(slot, _)| slot) @@ -153,17 +153,17 @@ mod tests { let expected = vec![0, 1, 2, 3]; assert_eq!(result, expected); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_skipping_rooted_slot_iterator() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Blocktree::open(&blocktree_path).unwrap(); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&blockstore_path).unwrap(); let ticks_per_slot = 5; /* - Build a blocktree in the ledger with the following fork structure: + Build a blockstore in the ledger with the following fork structure: slot 0 | slot 1 @@ -188,8 +188,8 @@ mod tests { slot - 1 } }; - fill_blocktree_slot_with_ticks( - &blocktree, + fill_blockstore_slot_with_ticks( + &blockstore, ticks_per_slot, slot, parent, @@ -198,14 +198,14 @@ mod tests { } // Set roots - blocktree.set_roots(&[0, 1, 2, 3]).unwrap(); + blockstore.set_roots(&[0, 1, 2, 3]).unwrap(); // Create one post-skip slot at 10, simulating starting from a snapshot // at 10 - blocktree.set_roots(&[10]).unwrap(); + blockstore.set_roots(&[10]).unwrap(); // Try to get an iterator from before the skip. The post-skip slot // should not return a SlotMeta - let result: Vec<_> = RootedSlotIterator::new(3, &blocktree) + let result: Vec<_> = RootedSlotIterator::new(3, &blockstore) .unwrap() .into_iter() .map(|(slot, meta)| (slot, meta.is_some())) @@ -214,12 +214,12 @@ mod tests { assert_eq!(result, expected); // Create one more post-skip slot at 11 with parent equal to 10 - fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 11, 10, Hash::default()); + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 11, 10, Hash::default()); // Set roots - blocktree.set_roots(&[11]).unwrap(); + blockstore.set_roots(&[11]).unwrap(); - let result: Vec<_> = RootedSlotIterator::new(0, &blocktree) + let result: Vec<_> = RootedSlotIterator::new(0, &blockstore) .unwrap() .into_iter() .map(|(slot, meta)| (slot, meta.is_some())) @@ -234,7 +234,7 @@ mod tests { ]; assert_eq!(result, expected); - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } } diff --git a/ledger/tests/blocktree.rs b/ledger/tests/blockstore.rs similarity index 60% rename from ledger/tests/blocktree.rs rename to ledger/tests/blockstore.rs index acc39c548f4256..04588a027e3100 100644 --- a/ledger/tests/blocktree.rs +++ b/ledger/tests/blockstore.rs @@ -1,6 +1,6 @@ use solana_ledger::entry; use solana_ledger::{ - blocktree::{self, Blocktree}, + blockstore::{self, Blockstore}, get_tmp_ledger_path, }; use solana_sdk::hash::Hash; @@ -9,8 +9,8 @@ use std::thread::Builder; #[test] fn test_multiple_threads_insert_shred() { - let blocktree_path = get_tmp_ledger_path!(); - let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap()); + let blockstore_path = get_tmp_ledger_path!(); + let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap()); for _ in 0..100 { let num_threads = 10; @@ -20,12 +20,12 @@ fn test_multiple_threads_insert_shred() { let threads: Vec<_> = (0..num_threads) .map(|i| { let entries = entry::create_ticks(1, 0, Hash::default()); - let shreds = blocktree::entries_to_test_shreds(entries, i + 1, 0, false, 0); - let blocktree_ = blocktree.clone(); + let shreds = blockstore::entries_to_test_shreds(entries, i + 1, 0, false, 0); + let blockstore_ = blockstore.clone(); Builder::new() - .name("blocktree-writer".to_string()) + .name("blockstore-writer".to_string()) .spawn(move || { - blocktree_.insert_shreds(shreds, None, false).unwrap(); + blockstore_.insert_shreds(shreds, None, false).unwrap(); }) .unwrap() }) @@ -36,16 +36,16 @@ fn test_multiple_threads_insert_shred() { } // Check slot 0 has the correct children - let mut meta0 = blocktree.meta(0).unwrap().unwrap(); + let mut meta0 = blockstore.meta(0).unwrap().unwrap(); meta0.next_slots.sort(); let expected_next_slots: Vec<_> = (1..num_threads + 1).collect(); assert_eq!(meta0.next_slots, expected_next_slots); // Delete slots for next iteration - blocktree.purge_slots(0, None); + blockstore.purge_slots(0, None); } // Cleanup - drop(blocktree); - Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); + drop(blockstore); + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index d301f76ef8825e..82eaa1bf68c422 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -10,7 +10,7 @@ use solana_core::{ gossip_service::discover_cluster, }; use solana_ledger::{ - blocktree::Blocktree, + blockstore::Blockstore, entry::{Entry, EntrySlice}, }; use solana_sdk::{ @@ -140,7 +140,7 @@ pub fn validator_exit(entry_point_info: &ContactInfo, nodes: usize) { } pub fn verify_ledger_ticks(ledger_path: &Path, ticks_per_slot: usize) { - let ledger = Blocktree::open(ledger_path).unwrap(); + let ledger = Blockstore::open(ledger_path).unwrap(); let zeroth_slot = ledger.get_slot_entries(0, 0, None).unwrap(); let last_id = zeroth_slot.last().unwrap().hash; let next_slots = ledger.get_slots_since(&[0]).unwrap().remove(&0).unwrap(); @@ -301,19 +301,23 @@ fn poll_all_nodes_for_signature( Ok(()) } -fn get_and_verify_slot_entries(blocktree: &Blocktree, slot: Slot, last_entry: &Hash) -> Vec { - let entries = blocktree.get_slot_entries(slot, 0, None).unwrap(); +fn get_and_verify_slot_entries( + blockstore: &Blockstore, + slot: Slot, + last_entry: &Hash, +) -> Vec { + let entries = blockstore.get_slot_entries(slot, 0, None).unwrap(); assert_eq!(entries.verify(last_entry), true); entries } fn verify_slot_ticks( - blocktree: &Blocktree, + blockstore: &Blockstore, slot: Slot, last_entry: &Hash, expected_num_ticks: Option, ) -> Hash { - let entries = get_and_verify_slot_entries(blocktree, slot, last_entry); + let entries = get_and_verify_slot_entries(blockstore, slot, last_entry); let num_ticks: usize = entries.iter().map(|entry| entry.is_tick() as usize).sum(); if let Some(expected_num_ticks) = expected_num_ticks { assert_eq!(num_ticks, expected_num_ticks); diff --git a/local-cluster/tests/archiver.rs b/local-cluster/tests/archiver.rs index 3ad5469a3a9d10..51874b744777ac 100644 --- a/local-cluster/tests/archiver.rs +++ b/local-cluster/tests/archiver.rs @@ -9,7 +9,7 @@ use solana_core::{ storage_stage::SLOTS_PER_TURN_TEST, validator::ValidatorConfig, }; -use solana_ledger::{blocktree::Blocktree, create_new_tmp_ledger, get_tmp_ledger_path}; +use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger, get_tmp_ledger_path}; use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster}; use solana_sdk::{ commitment_config::CommitmentConfig, @@ -62,9 +62,14 @@ fn run_archiver_startup_basic(num_nodes: usize, num_archivers: usize) { cluster_nodes[0].clone(), ))); let path = get_tmp_ledger_path!(); - let blocktree = Arc::new(Blocktree::open(&path).unwrap()); - Archiver::download_from_archiver(&cluster_info, &archiver_info, &blocktree, slots_per_segment) - .unwrap(); + let blockstore = Arc::new(Blockstore::open(&path).unwrap()); + Archiver::download_from_archiver( + &cluster_info, + &archiver_info, + &blockstore, + slots_per_segment, + ) + .unwrap(); } #[test] @@ -113,8 +118,8 @@ fn test_archiver_startup_leader_hang() { assert!(archiver_res.is_err()); } - let _ignored = Blocktree::destroy(&leader_ledger_path); - let _ignored = Blocktree::destroy(&archiver_ledger_path); + let _ignored = Blockstore::destroy(&leader_ledger_path); + let _ignored = Blockstore::destroy(&archiver_ledger_path); let _ignored = remove_dir_all(&leader_ledger_path); let _ignored = remove_dir_all(&archiver_ledger_path); } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 52e8fa66e5c803..0e0cb957244de0 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -10,7 +10,7 @@ use solana_core::{ validator::ValidatorConfig, }; use solana_ledger::{ - bank_forks::SnapshotConfig, blocktree::Blocktree, leader_schedule::FixedSchedule, + bank_forks::SnapshotConfig, blockstore::Blockstore, leader_schedule::FixedSchedule, leader_schedule::LeaderSchedule, snapshot_utils, }; use solana_local_cluster::{ @@ -67,12 +67,12 @@ fn test_ledger_cleanup_service() { //check everyone's ledgers and make sure only ~100 slots are stored for (_, info) in &cluster.validators { let mut slots = 0; - let blocktree = Blocktree::open(&info.info.ledger_path).unwrap(); - blocktree + let blockstore = Blockstore::open(&info.info.ledger_path).unwrap(); + blockstore .slot_meta_iterator(0) .unwrap() .for_each(|_| slots += 1); - // with 3 nodes upto 3 slots can be in progress and not complete so max slots in blocktree should be upto 103 + // with 3 nodes upto 3 slots can be in progress and not complete so max slots in blockstore should be upto 103 assert!(slots <= 103, "got {}", slots); } } @@ -674,7 +674,7 @@ fn test_snapshot_restart_tower() { #[test] #[serial] -fn test_snapshots_blocktree_floor() { +fn test_snapshots_blockstore_floor() { // First set up the cluster with 1 snapshotting leader let snapshot_interval_slots = 10; let num_account_paths = 4; @@ -747,10 +747,10 @@ fn test_snapshots_blocktree_floor() { // Check the validator ledger doesn't contain any slots < slot_floor cluster.close_preserve_ledgers(); let validator_ledger_path = &cluster.validators[&validator_id]; - let blocktree = Blocktree::open(&validator_ledger_path.info.ledger_path).unwrap(); + let blockstore = Blockstore::open(&validator_ledger_path.info.ledger_path).unwrap(); - // Skip the zeroth slot in blocktree that the ledger is initialized with - let (first_slot, _) = blocktree.slot_meta_iterator(1).unwrap().next().unwrap(); + // Skip the zeroth slot in blockstore that the ledger is initialized with + let (first_slot, _) = blockstore.slot_meta_iterator(1).unwrap().next().unwrap(); assert_eq!(first_slot, slot_floor); } @@ -932,7 +932,7 @@ fn test_no_voting() { cluster.close_preserve_ledgers(); let leader_pubkey = cluster.entry_point_info.id; let ledger_path = cluster.validators[&leader_pubkey].info.ledger_path.clone(); - let ledger = Blocktree::open(&ledger_path).unwrap(); + let ledger = Blockstore::open(&ledger_path).unwrap(); for i in 0..2 * VOTE_THRESHOLD_DEPTH { let meta = ledger.meta(i as u64).unwrap().unwrap(); let parent = meta.parent_slot; diff --git a/metrics/scripts/grafana-provisioning/dashboards/testnet-monitor.json b/metrics/scripts/grafana-provisioning/dashboards/testnet-monitor.json index 7c62def3fceb58..edda3cc5679480 100644 --- a/metrics/scripts/grafana-provisioning/dashboards/testnet-monitor.json +++ b/metrics/scripts/grafana-provisioning/dashboards/testnet-monitor.json @@ -4062,7 +4062,7 @@ "hide": false, "orderByTime": "ASC", "policy": "default", - "query": "SELECT host_id,error FROM \"$testnet\".\"autogen\".\"blocktree_error\" WHERE $timeFilter ORDER BY time DESC ", + "query": "SELECT host_id,error FROM \"$testnet\".\"autogen\".\"blockstore_error\" WHERE $timeFilter ORDER BY time DESC ", "rawQuery": true, "refId": "A", "resultFormat": "table", @@ -4083,7 +4083,7 @@ "tags": [] } ], - "title": "Unexpected Blocktree Errors", + "title": "Unexpected Blockstore Errors", "transform": "table", "type": "table" }, @@ -6693,7 +6693,7 @@ "measurement": "cluster_info-vote-count", "orderByTime": "ASC", "policy": "autogen", - "query": "SELECT sum(\"recovered\") AS \"recovered\" FROM \"$testnet\".\"autogen\".\"blocktree-erasure\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval) FILL(0)", + "query": "SELECT sum(\"recovered\") AS \"recovered\" FROM \"$testnet\".\"autogen\".\"blockstore-erasure\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval) FILL(0)", "rawQuery": true, "refId": "B", "resultFormat": "time_series",