unrelated, but in this line: A raw pointer that is never null is better passed as a reference.
diff --git a/src/headerssync.cpp b/src/headerssync.cpp
index 757b942cd9..18ffe65161 100644
--- a/src/headerssync.cpp
+++ b/src/headerssync.cpp
@@ -23,14 +23,14 @@ constexpr size_t REDOWNLOAD_BUFFER_SIZE{13959}; // 13959/584 = ~23.9 commitments
static_assert(sizeof(CompressedHeader) == 48);
HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus_params,
- const CBlockIndex* chain_start, const arith_uint256& minimum_required_work) :
+ const CBlockIndex& chain_start, const arith_uint256& minimum_required_work) :
m_id(id), m_consensus_params(consensus_params),
m_chain_start(chain_start),
m_minimum_required_work(minimum_required_work),
- m_current_chain_work(chain_start->nChainWork),
+ m_current_chain_work{chain_start.nChainWork},
m_commit_offset(GetRand<unsigned>(HEADER_COMMITMENT_PERIOD)),
- m_last_header_received(m_chain_start->GetBlockHeader()),
- m_current_height(chain_start->nHeight)
+ m_last_header_received{chain_start.GetBlockHeader()},
+ m_current_height{chain_start.nHeight}
{
// Estimate the number of blocks that could possibly exist on the peer's
// chain *right now* using 6 blocks/second (fastest blockrate given the MTP
@@ -40,7 +40,7 @@ HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus
// exceeds this bound, because it's not possible for a consensus-valid
// chain to be longer than this (at the current time -- in the future we
// could try again, if necessary, to sync a longer chain).
- m_max_commitments = 6*(Ticks<std::chrono::seconds>(GetAdjustedTime() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}) + MAX_FUTURE_BLOCK_TIME) / HEADER_COMMITMENT_PERIOD;
+ m_max_commitments = 6*(Ticks<std::chrono::seconds>(GetAdjustedTime() - NodeSeconds{std::chrono::seconds{chain_start.GetMedianTimePast()}}) + MAX_FUTURE_BLOCK_TIME) / HEADER_COMMITMENT_PERIOD;
LogPrint(BCLog::NET, "Initial headers sync started with peer=%d: height=%i, max_commitments=%i, min_work=%s\n", m_id, m_current_height, m_max_commitments, m_minimum_required_work.ToString());
}
@@ -164,10 +164,10 @@ bool HeadersSyncState::ValidateAndStoreHeadersCommitments(const std::vector<CBlo
if (m_current_chain_work >= m_minimum_required_work) {
m_redownloaded_headers.clear();
- m_redownload_buffer_last_height = m_chain_start->nHeight;
- m_redownload_buffer_first_prev_hash = m_chain_start->GetBlockHash();
- m_redownload_buffer_last_hash = m_chain_start->GetBlockHash();
- m_redownload_chain_work = m_chain_start->nChainWork;
+ m_redownload_buffer_last_height = m_chain_start.nHeight;
+ m_redownload_buffer_first_prev_hash = m_chain_start.GetBlockHash();
+ m_redownload_buffer_last_hash = m_chain_start.GetBlockHash();
+ m_redownload_chain_work = m_chain_start.nChainWork;
m_download_state = State::REDOWNLOAD;
LogPrint(BCLog::NET, "Initial headers sync transition with peer=%d: reached sufficient work at height=%i, redownloading from height=%i\n", m_id, m_current_height, m_redownload_buffer_last_height);
}
@@ -231,7 +231,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he
if (!m_redownloaded_headers.empty()) {
previous_nBits = m_redownloaded_headers.back().nBits;
} else {
- previous_nBits = m_chain_start->nBits;
+ previous_nBits = m_chain_start.nBits;
}
if (!PermittedDifficultyTransition(m_consensus_params, next_height,
@@ -298,7 +298,7 @@ CBlockLocator HeadersSyncState::NextHeadersRequestLocator() const
Assume(m_download_state != State::FINAL);
if (m_download_state == State::FINAL) return {};
- auto chain_start_locator = LocatorEntries(m_chain_start);
+ auto chain_start_locator = LocatorEntries(&m_chain_start);
std::vector<uint256> locator;
if (m_download_state == State::PRESYNC) {
diff --git a/src/headerssync.h b/src/headerssync.h
index 16da964246..6455b7dae6 100644
--- a/src/headerssync.h
+++ b/src/headerssync.h
@@ -136,7 +136,7 @@ public:
* minimum_required_work: amount of chain work required to accept the chain
*/
HeadersSyncState(NodeId id, const Consensus::Params& consensus_params,
- const CBlockIndex* chain_start, const arith_uint256& minimum_required_work);
+ const CBlockIndex& chain_start, const arith_uint256& minimum_required_work);
/** Result data structure for ProcessNextHeaders. */
struct ProcessingResult {
@@ -208,7 +208,7 @@ private:
const Consensus::Params& m_consensus_params;
/** Store the last block in our block index that the peer's chain builds from */
- const CBlockIndex* m_chain_start{nullptr};
+ const CBlockIndex& m_chain_start;
/** Minimum work that we're looking for on this chain. */
const arith_uint256 m_minimum_required_work;
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 8e80bd7aef..2dbac5f6cb 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -647,7 +647,7 @@ private:
* otherwise.
*/
bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom,
- const CBlockIndex* chain_start_header,
+ const CBlockIndex& chain_start_header,
std::vector<CBlockHeader>& headers)
EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex);
@@ -2528,10 +2528,10 @@ bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfro
return false;
}
-bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers)
+bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex& chain_start_header, std::vector<CBlockHeader>& headers)
{
// Calculate the total work on this chain.
- arith_uint256 total_work = chain_start_header->nChainWork + CalculateHeadersWork(headers);
+ arith_uint256 total_work = chain_start_header.nChainWork + CalculateHeadersWork(headers);
// Our dynamic anti-DoS threshold (minimum work required on a headers chain
// before we'll store it)
@@ -2561,7 +2561,7 @@ bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlo
// process the headers using it as normal.
return IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
} else {
- LogPrint(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId());
+ LogPrint(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header.nHeight + headers.size(), pfrom.GetId());
// Since this is a low-work headers chain, no further processing is required.
headers = {};
return true;
@@ -2831,7 +2831,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
// Do anti-DoS checks to determine if we should process or store for later
// processing.
if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom,
- chain_start_header, headers)) {
+ *chain_start_header, headers)) {
// If we successfully started a low-work headers sync, then there
// should be no headers to process any further.
Assume(headers.empty());
diff --git a/src/test/headers_sync_chainwork_tests.cpp b/src/test/headers_sync_chainwork_tests.cpp
index 41241ebee2..f7be84a356 100644
--- a/src/test/headers_sync_chainwork_tests.cpp
+++ b/src/test/headers_sync_chainwork_tests.cpp
@@ -85,7 +85,7 @@ BOOST_AUTO_TEST_CASE(headers_sync_state)
Params().GenesisBlock().nVersion, Params().GenesisBlock().nTime,
ArithToUint256(1), Params().GenesisBlock().nBits);
- const CBlockIndex* chain_start = WITH_LOCK(::cs_main, return m_node.chainman->m_blockman.LookupBlockIndex(Params().GenesisBlock().GetHash()));
+ const CBlockIndex& chain_start { *Assert(WITH_LOCK(::cs_main, return m_node.chainman->m_blockman.LookupBlockIndex(Params().GenesisBlock().GetHash())))};
std::vector<CBlockHeader> headers_batch;
// Feed the first chain to HeadersSyncState, by delivering 1 header