Skip to content

Commit

Permalink
merge bitcoin#24909: Move and rename pindexBestHeader, fHavePruned
Browse files Browse the repository at this point in the history
  • Loading branch information
kwvg authored and knst committed Aug 12, 2024
1 parent eb6e7b6 commit 8386cb4
Show file tree
Hide file tree
Showing 14 changed files with 121 additions and 115 deletions.
4 changes: 2 additions & 2 deletions src/bench/rpc_blockchain.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ static void BlockToJsonVerbose(benchmark::Bench& bench)
TestBlockAndIndex data;
const LLMQContext& llmq_ctx = *data.testing_setup->m_node.llmq_ctx;
bench.run([&] {
auto univalue = blockToJSON(data.block, &data.blockindex, &data.blockindex, *llmq_ctx.clhandler, *llmq_ctx.isman, /*verbose*/ true);
auto univalue = blockToJSON(data.testing_setup->m_node.chainman->m_blockman, data.block, &data.blockindex, &data.blockindex, *llmq_ctx.clhandler, *llmq_ctx.isman, /*verbose*/ true);
ankerl::nanobench::doNotOptimizeAway(univalue);
});
}
Expand All @@ -56,7 +56,7 @@ static void BlockToJsonVerboseWrite(benchmark::Bench& bench)
{
TestBlockAndIndex data;
const LLMQContext& llmq_ctx = *data.testing_setup->m_node.llmq_ctx;
auto univalue = blockToJSON(data.block, &data.blockindex, &data.blockindex, *llmq_ctx.clhandler, *llmq_ctx.isman, /*verbose*/ true);
auto univalue = blockToJSON(data.testing_setup->m_node.chainman->m_blockman, data.block, &data.blockindex, &data.blockindex, *llmq_ctx.clhandler, *llmq_ctx.isman, /*verbose*/ true);
bench.run([&] {
auto str = univalue.write();
ankerl::nanobench::doNotOptimizeAway(str);
Expand Down
2 changes: 1 addition & 1 deletion src/dsnotificationinterface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ void CDSNotificationInterface::UpdatedBlockTip(const CBlockIndex *pindexNew, con
if (pindexNew == pindexFork) // blocks were disconnected without any new ones
return;

m_mn_sync.UpdatedBlockTip(pindexNew, fInitialDownload);
m_mn_sync.UpdatedBlockTip(m_chainman.m_best_header, pindexNew, fInitialDownload);

// Update global DIP0001 activation status
fDIP0001ActiveAtTip = pindexNew->nHeight >= Params().GetConsensus().DIP0001Height;
Expand Down
12 changes: 6 additions & 6 deletions src/init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1890,7 +1890,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)

if (ShutdownRequested()) break;

// LoadBlockIndex will load fHavePruned if we've ever removed a
// LoadBlockIndex will load m_have_pruned if we've ever removed a
// block file from disk.
// Note that it also sets fReindex based on the disk flag!
// From here on out fReindex and fReset mean something different!
Expand Down Expand Up @@ -1936,7 +1936,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)

// Check for changed -prune state. What we are concerned about is a user who has pruned blocks
// in the past, but is now trying to run unpruned.
if (fHavePruned && !fPruneMode) {
if (chainman.m_blockman.m_have_pruned && !fPruneMode) {
strLoadError = _("You need to rebuild the database using -reindex to go back to unpruned mode. This will redownload the entire blockchain");
break;
}
Expand Down Expand Up @@ -2022,7 +2022,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
for (CChainState* chainstate : chainman.GetAll()) {
if (!is_coinsview_empty(chainstate)) {
uiInterface.InitMessage(_("Verifying blocks…").translated);
if (fHavePruned && args.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > MIN_BLOCKS_TO_KEEP) {
if (chainman.m_blockman.m_have_pruned && args.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > MIN_BLOCKS_TO_KEEP) {
LogPrintf("Prune: pruned datadir may not have more than %d blocks; only checking available blocks\n",
MIN_BLOCKS_TO_KEEP);
}
Expand Down Expand Up @@ -2328,9 +2328,9 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
tip_info->block_hash = chainman.ActiveChain().Tip() ? chainman.ActiveChain().Tip()->GetBlockHash() : Params().GenesisBlock().GetHash();
tip_info->verification_progress = GuessVerificationProgress(Params().TxData(), chainman.ActiveChain().Tip());
}
if (tip_info && ::pindexBestHeader) {
tip_info->header_height = ::pindexBestHeader->nHeight;
tip_info->header_time = ::pindexBestHeader->GetBlockTime();
if (tip_info && chainman.m_best_header) {
tip_info->header_height = chainman.m_best_header->nHeight;
tip_info->header_time = chainman.m_best_header->GetBlockTime();
}
}
LogPrintf("nBestHeight = %d\n", chain_active_height);
Expand Down
3 changes: 1 addition & 2 deletions src/masternode/sync.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ void CMasternodeSync::NotifyHeaderTip(const CBlockIndex *pindexNew, bool fInitia
}
}

void CMasternodeSync::UpdatedBlockTip(const CBlockIndex *pindexNew, bool fInitialDownload)
void CMasternodeSync::UpdatedBlockTip(const CBlockIndex *pindexTip, const CBlockIndex *pindexNew, bool fInitialDownload)
{
LogPrint(BCLog::MNSYNC, "CMasternodeSync::UpdatedBlockTip -- pindexNew->nHeight: %d fInitialDownload=%d\n", pindexNew->nHeight, fInitialDownload);
nTimeLastUpdateBlockTip = GetTime<std::chrono::seconds>().count();
Expand All @@ -353,7 +353,6 @@ void CMasternodeSync::UpdatedBlockTip(const CBlockIndex *pindexNew, bool fInitia
}

// Note: since we sync headers first, it should be ok to use this
CBlockIndex* pindexTip = WITH_LOCK(cs_main, return pindexBestHeader);
if (pindexTip == nullptr) return;
bool fReachedBestHeaderNew = pindexNew->GetBlockHash() == pindexTip->GetBlockHash();

Expand Down
2 changes: 1 addition & 1 deletion src/masternode/sync.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ class CMasternodeSync

void AcceptedBlockHeader(const CBlockIndex *pindexNew);
void NotifyHeaderTip(const CBlockIndex *pindexNew, bool fInitialDownload);
void UpdatedBlockTip(const CBlockIndex *pindexNew, bool fInitialDownload);
void UpdatedBlockTip(const CBlockIndex *pindexTip, const CBlockIndex *pindexNew, bool fInitialDownload);

void DoMaintenance();
};
Expand Down
47 changes: 24 additions & 23 deletions src/net_processing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1856,9 +1856,9 @@ bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
{
AssertLockHeld(cs_main);
if (m_chainman.ActiveChain().Contains(pindex)) return true;
return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
(pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
(GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) &&
(m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
(GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
}

std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
Expand Down Expand Up @@ -2434,7 +2434,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
// disconnect node in case we have reached the outbound limit for serving historical blocks
if (m_connman.OutboundTargetReached(true) &&
(((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
(((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
!pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
) {
LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
Expand Down Expand Up @@ -2824,13 +2824,13 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
if (!m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
nodestate->nUnconnectingHeaders++;
std::string msg_type = (pfrom.nServices & NODE_HEADERS_COMPRESSED) ? NetMsgType::GETHEADERS2 : NetMsgType::GETHEADERS;
m_connman.PushMessage(&pfrom, msgMaker.Make(msg_type, m_chainman.ActiveChain().GetLocator(pindexBestHeader), uint256()));
m_connman.PushMessage(&pfrom, msgMaker.Make(msg_type, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), uint256()));
LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending %s (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
headers[0].GetHash().ToString(),
headers[0].hashPrevBlock.ToString(),
msg_type,
pindexBestHeader->nHeight,
pfrom.GetId(), nodestate->nUnconnectingHeaders);
headers[0].GetHash().ToString(),
headers[0].hashPrevBlock.ToString(),
msg_type,
m_chainman.m_best_header->nHeight,
pfrom.GetId(), nodestate->nUnconnectingHeaders);
// Set hashLastUnknownBlock for this peer, so that if we
// eventually get the headers - even from a different peer -
// we can use this peer to download.
Expand Down Expand Up @@ -2887,7 +2887,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer,

if (nCount == MAX_HEADERS_RESULTS) {
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or pindexBestHeader, continue
// TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or m_chainman.m_best_header, continue
// from there instead.
std::string msg_type = (pfrom.nServices & NODE_HEADERS_COMPRESSED) ? NetMsgType::GETHEADERS2 : NetMsgType::GETHEADERS;
LogPrint(BCLog::NET, "more %s (%d) to end to peer=%d (startheight:%d)\n",
Expand Down Expand Up @@ -3819,15 +3819,15 @@ void PeerManagerImpl::ProcessMessage(
// Download if this is a nice peer, or we have no nice peers and this one might do.
bool fFetch = state->fPreferredDownload || (nPreferredDownload == 0 && !pfrom.IsAddrFetchConn());
// Only actively request headers from a single peer, unless we're close to end of initial download.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - nMaxTipAge) {
if ((nSyncStarted == 0 && fFetch) || m_chainman.m_best_header->GetBlockTime() > GetAdjustedTime() - nMaxTipAge) {
// Make sure to mark this peer as the one we are currently syncing with etc.
state->fSyncStarted = true;
state->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
(
// Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
// to maintain precision
std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
(GetAdjustedTime() - pindexBestHeader->GetBlockTime()) / m_chainparams.GetConsensus().nPowTargetSpacing
(GetAdjustedTime() - m_chainman.m_best_header->GetBlockTime()) / m_chainparams.GetConsensus().nPowTargetSpacing
);
nSyncStarted++;
// Headers-first is the primary method of announcement on
Expand Down Expand Up @@ -3872,8 +3872,8 @@ void PeerManagerImpl::ProcessMessage(
}
if (best_block != nullptr) {
std::string msg_type = (pfrom.nServices & NODE_HEADERS_COMPRESSED) ? NetMsgType::GETHEADERS2 : NetMsgType::GETHEADERS;
m_connman.PushMessage(&pfrom, msgMaker.Make(msg_type, m_chainman.ActiveChain().GetLocator(pindexBestHeader), *best_block));
LogPrint(BCLog::NET, "%s (%d) %s to peer=%d\n", msg_type, pindexBestHeader->nHeight, best_block->ToString(), pfrom.GetId());
m_connman.PushMessage(&pfrom, msgMaker.Make(msg_type, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *best_block));
LogPrint(BCLog::NET, "%s (%d) %s to peer=%d\n", msg_type, m_chainman.m_best_header->nHeight, best_block->ToString(), pfrom.GetId());
}

return;
Expand Down Expand Up @@ -4289,7 +4289,7 @@ void PeerManagerImpl::ProcessMessage(
if (!m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
// Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
if (!m_chainman.ActiveChainstate().IsInitialBlockDownload())
m_connman.PushMessage(&pfrom, msgMaker.Make((pfrom.nServices & NODE_HEADERS_COMPRESSED) ? NetMsgType::GETHEADERS2 : NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexBestHeader), uint256()));
m_connman.PushMessage(&pfrom, msgMaker.Make((pfrom.nServices & NODE_HEADERS_COMPRESSED) ? NetMsgType::GETHEADERS2 : NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), uint256()));
return;
}

Expand Down Expand Up @@ -5445,28 +5445,29 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
CNodeState &state = *State(pto->GetId());

// Start block sync
if (pindexBestHeader == nullptr)
pindexBestHeader = m_chainman.ActiveChain().Tip();
if (m_chainman.m_best_header == nullptr) {
m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
}
bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do.
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex && pto->CanRelay()) {
// Only actively request headers from a single peer, unless we're close to end of initial download.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - nMaxTipAge) {
if ((nSyncStarted == 0 && fFetch) || m_chainman.m_best_header->GetBlockTime() > GetAdjustedTime() - nMaxTipAge) {
state.fSyncStarted = true;
state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
(
// Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
// to maintain precision
std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
(GetAdjustedTime() - pindexBestHeader->GetBlockTime()) / consensusParams.nPowTargetSpacing
(GetAdjustedTime() - m_chainman.m_best_header->GetBlockTime()) / consensusParams.nPowTargetSpacing
);
nSyncStarted++;
const CBlockIndex *pindexStart = pindexBestHeader;
const CBlockIndex* pindexStart = m_chainman.m_best_header;
/* If possible, start at the block preceding the currently
best known header. This ensures that we always get a
non-empty list of headers back as long as the peer
is up-to-date. With a non-empty response, we can initialise
the peer's known best block. This wouldn't be possible
if we requested starting at pindexBestHeader and
if we requested starting at m_chainman.m_best_header and
got back an empty response. */
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
Expand Down Expand Up @@ -5834,7 +5835,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Check for headers sync timeouts
if (state.fSyncStarted && state.m_headers_sync_timeout < std::chrono::microseconds::max()) {
// Detect whether this is a stalling initial-headers-sync peer
if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - nMaxTipAge) {
if (m_chainman.m_best_header->GetBlockTime() <= GetAdjustedTime() - nMaxTipAge) {
if (current_time > state.m_headers_sync_timeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
// Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
// and we have others we could be using instead.
Expand Down
20 changes: 10 additions & 10 deletions src/node/blockstorage.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@

std::atomic_bool fImporting(false);
std::atomic_bool fReindex(false);
bool fHavePruned = false;
bool fPruneMode = false;
uint64_t nPruneTarget = 0;

Expand Down Expand Up @@ -86,7 +85,8 @@ const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
return it == m_block_index.end() ? nullptr : &it->second;
}

CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, const uint256& hash, enum BlockStatus nStatus)
CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, const uint256& hash, CBlockIndex*& best_header,
enum BlockStatus nStatus)
{
assert(!(nStatus & BLOCK_FAILED_MASK)); // no failed blocks allowed
AssertLockHeld(cs_main);
Expand All @@ -113,8 +113,8 @@ CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, const uint
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
if (nStatus & BLOCK_VALID_MASK) {
pindexNew->RaiseValidity(nStatus);
if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork) {
pindexBestHeader = pindexNew;
if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
best_header = pindexNew;
}
} else {
pindexNew->RaiseValidity(BLOCK_VALID_TREE); // required validity level
Expand Down Expand Up @@ -309,8 +309,6 @@ bool BlockManager::LoadBlockIndex(const Consensus::Params& consensus_params)
if (pindex->pprev) {
pindex->BuildSkip();
}
if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
pindexBestHeader = pindex;
}

return true;
Expand All @@ -327,6 +325,8 @@ void BlockManager::Unload()
m_last_blockfile = 0;
m_dirty_blockindex.clear();
m_dirty_fileinfo.clear();

m_have_pruned = false;
}

bool BlockManager::WriteBlockIndexDB()
Expand Down Expand Up @@ -389,8 +389,8 @@ bool BlockManager::LoadBlockIndexDB()
}

// Check whether we have ever pruned block & undo files
m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
if (fHavePruned) {
m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
if (m_have_pruned) {
LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
}

Expand Down Expand Up @@ -428,10 +428,10 @@ const CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
return nullptr;
}

bool IsBlockPruned(const CBlockIndex* pblockindex)
bool BlockManager::IsBlockPruned(const CBlockIndex* pblockindex)
{
AssertLockHeld(::cs_main);
return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
return (m_have_pruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
}

// If we're using -prune with -reindex, then delete block files that will be ignored by the
Expand Down
Loading

0 comments on commit 8386cb4

Please sign in to comment.