The consensus has been that we want as few parts of the system as possible knowing about “snapshots” per se.
Sure; but the map already means the code needs to know about snapshots per se.
if we were ever to further parallelize validation with more than one background validation chainstate, we could keep this structure.
That seems crazy to me; managing two utxo sets is already a lot of space, increasing that is costly, and I don’t think validating the chain at many points would help with any bottlenecks.
But you shouldn’t even need the bool; the fact that the snapshots are all at different points in the same chain should let you just say: last_common_block_for_this_chainstate = min(tip, last_common_block)
when picking where to start, and if (last_common_block->nHeight < pindex->nHeight) last_common_block = pindex
when updating.
0--- a/src/net_processing.cpp
1+++ b/src/net_processing.cpp
2@@ -391,7 +391,7 @@ struct CNodeState {
3 //! any CChainState objects which were in use at any point (e.g. a background
4 //! validation chainstate which has completed) until the end of
5 //! init.cpp:Shutdown(), else we'll have bad pointers here.
6- std::map<const CChainState*, const CBlockIndex*> chainstate_to_last_common_block = {};
7+ const CBlockIndex* last_common_block = nullptr;
8
9 //! The best header we have sent our peer.
10 const CBlockIndex* pindexBestHeaderSent{nullptr};
11@@ -757,7 +757,7 @@ private:
12
13 bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
14
15- /** Update chainstate_to_last_common_block and add not-in-flight missing successors
16+ /** Update last_common_block and add not-in-flight missing successors
17 * to vBlocks, until it has at most count entries.
18 */
19 void FindNextBlocksToDownload(
20@@ -1140,6 +1140,13 @@ void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash
21 }
22 }
23
24+static const CBlockIndex* GetLastCommonBlock(const CBlockIndex* tip, const CBlockIndex* last_common_block) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
25+{
26+ if (last_common_block == nullptr || tip == nullptr) return nullptr;
27+ if (tip->nHeight <= last_common_block->nHeight) return tip;
28+ return last_common_block;
29+}
30+
31 void PeerManagerImpl::FindNextBlocksToDownload(
32 const CChainState* const chainstate,
33 NodeId nodeid,
34@@ -1167,27 +1174,22 @@ void PeerManagerImpl::FindNextBlocksToDownload(
35 return;
36 }
37
38- if (!state->chainstate_to_last_common_block.count(chainstate)) {
39+ if (!state->last_common_block) {
40 // Bootstrap quickly by guessing a parent of our best tip is the forking point.
41 // Guessing wrong in either direction is not a problem.
42- //
43- // Namespace this by chainstate so that we can simultaneously sync two
44- // separate chainstates at different heights.
45- state->chainstate_to_last_common_block[chainstate] = our_chain[
46- std::min(state->pindexBestKnownBlock->nHeight, our_chain.Height())];
47+ state->last_common_block = our_chain[std::min(state->pindexBestKnownBlock->nHeight, our_chain.Height())];
48 }
49
50 // If the peer reorganized, our previous chainstate_to_last_common_block may not be an ancestor
51 // of its current tip anymore. Go back enough to fix that.
52- state->chainstate_to_last_common_block[chainstate] = LastCommonAncestor(
53- state->chainstate_to_last_common_block[chainstate], state->pindexBestKnownBlock);
54+ state->last_common_block = LastCommonAncestor(state->last_common_block, state->pindexBestKnownBlock);
55
56- if (state->chainstate_to_last_common_block[chainstate] == state->pindexBestKnownBlock) {
57+ if (state->last_common_block == state->pindexBestKnownBlock) {
58 return;
59 }
60
61 std::vector<const CBlockIndex*> vToFetch;
62- const CBlockIndex *pindexWalk = state->chainstate_to_last_common_block[chainstate];
63+ const CBlockIndex* pindexWalk = GetLastCommonBlock(our_tip, state->last_common_block);
64 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
65 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
66 // download that next block if the window were 1 larger.
67@@ -1209,7 +1211,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(
68
69 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
70 // are not yet downloaded and not in flight to vBlocks. In the meantime, update
71- // chainstate_to_last_common_block as long as all ancestors are already downloaded, or if it's
72+ // last_common_block as long as all ancestors are already downloaded, or if it's
73 // already part of our chain (and therefore don't need it even if pruned).
74 for (const CBlockIndex* pindex : vToFetch) {
75 if (!pindex->IsValid(BLOCK_VALID_TREE)) {
76@@ -1221,8 +1223,11 @@ void PeerManagerImpl::FindNextBlocksToDownload(
77 return;
78 }
79 if (pindex->nStatus & BLOCK_HAVE_DATA || our_chain.Contains(pindex)) {
80- if (pindex->HaveTxsDownloaded())
81- state->chainstate_to_last_common_block[chainstate] = pindex;
82+ if (pindex->HaveTxsDownloaded()) {
83+ if (state->last_common_block == nullptr || pindex->nHeight > state->last_common_block->nHeight) {
84+ state->last_common_block = pindex;
85+ }
86+ }
87 } else if (!IsBlockRequested(pindex->GetBlockHash())) {
88 // The block is not already downloaded, and not yet in flight.
89 if (pindex->nHeight > nWindowEnd) {
90@@ -1434,13 +1439,8 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c
91 stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
92 stats.nCommonHeight = -1;
93
94- // Take the max common height with this peer across chainstates.
95- for (const auto& it : state->chainstate_to_last_common_block) {
96- const CBlockIndex* block = it.second;
97- if (block && block->nHeight > stats.nCommonHeight) {
98- stats.nCommonHeight = block->nHeight;
99- }
100- }
101+ if (state->last_common_block) stats.nCommonHeight = state->last_common_block->nHeight;
102+
103 for (const QueuedBlock& queue : state->vBlocksInFlight) {
104 if (queue.pindex)
105 stats.vHeightInFlight.push_back(queue.pindex->nHeight);