Here is a patch to do that on top of this PR. Could be a followup. I do not want to bloat this PR with this unless reviewers want it.
0diff --git i/src/net.cpp w/src/net.cpp
1index 81ee1eec16..4189cee0df 100644
2--- i/src/net.cpp
3+++ w/src/net.cpp
4@@ -3051,18 +3051,18 @@ void CConnman::ThreadMessageHandler()
5
6 for (auto& pnode : nodes) {
7 if (pnode->fDisconnect)
8 continue;
9
10 // Receive messages
11- bool fMoreNodeWork = m_msgproc->ProcessMessages(pnode.get(), flagInterruptMsgProc);
12+ bool fMoreNodeWork = m_msgproc->ProcessMessages(*pnode, flagInterruptMsgProc);
13 fMoreWork |= (fMoreNodeWork && !pnode->fPauseSend);
14 if (flagInterruptMsgProc)
15 return;
16 // Send messages
17- m_msgproc->SendMessages(pnode.get());
18+ m_msgproc->SendMessages(*pnode);
19
20 if (flagInterruptMsgProc)
21 return;
22 }
23 }
24
25diff --git i/src/net.h w/src/net.h
26index 051ff50f3b..1e93dfcaa5 100644
27--- i/src/net.h
28+++ w/src/net.h
29@@ -1002,27 +1002,27 @@ public:
30 * Callback to determine whether the given set of service flags are sufficient
31 * for a peer to be "relevant".
32 */
33 virtual bool HasAllDesirableServiceFlags(ServiceFlags services) const = 0;
34
35 /**
36- * Process protocol messages received from a given node
37- *
38- * [@param](/bitcoin-bitcoin/contributor/param/)[in] pnode The node which we have received messages from.
39- * [@param](/bitcoin-bitcoin/contributor/param/)[in] interrupt Interrupt condition for processing threads
40- * [@return](/bitcoin-bitcoin/contributor/return/) True if there is more work to be done
41- */
42- virtual bool ProcessMessages(CNode* pnode, std::atomic<bool>& interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) = 0;
43+ * Process protocol messages received from a given node
44+ *
45+ * [@param](/bitcoin-bitcoin/contributor/param/)[in,out] from The node which we have received messages from.
46+ * [@param](/bitcoin-bitcoin/contributor/param/)[in] interrupt Interrupt condition for processing threads.
47+ * [@return](/bitcoin-bitcoin/contributor/return/) True if there is more work to be done.
48+ */
49+ virtual bool ProcessMessages(CNode& from, std::atomic<bool>& interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) = 0;
50
51 /**
52- * Send queued protocol messages to a given node.
53- *
54- * [@param](/bitcoin-bitcoin/contributor/param/)[in] pnode The node which we are sending messages to.
55- * [@return](/bitcoin-bitcoin/contributor/return/) True if there is more work to be done
56- */
57- virtual bool SendMessages(CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) = 0;
58+ * Send queued protocol messages to a given node.
59+ *
60+ * [@param](/bitcoin-bitcoin/contributor/param/)[in,out] to The node which we are sending messages to.
61+ * [@return](/bitcoin-bitcoin/contributor/return/) True if there is more work to be done.
62+ */
63+ virtual bool SendMessages(CNode& to) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) = 0;
64
65
66 protected:
67 /**
68 * Protected destructor so that instances can only be deleted by derived classes.
69 * If that restriction is no longer desired, this should be made public and virtual.
70diff --git i/src/net_processing.cpp w/src/net_processing.cpp
71index 78e97b6684..7153226635 100644
72--- i/src/net_processing.cpp
73+++ w/src/net_processing.cpp
74@@ -519,15 +519,15 @@ public:
75 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
76
77 /** Implement NetEventsInterface */
78 void InitializeNode(const CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_tx_download_mutex);
79 void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, !m_tx_download_mutex);
80 bool HasAllDesirableServiceFlags(ServiceFlags services) const override;
81- bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override
82+ bool ProcessMessages(CNode& from, std::atomic<bool>& interrupt) override
83 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex);
84- bool SendMessages(CNode* pto) override
85+ bool SendMessages(CNode& to) override
86 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, g_msgproc_mutex, !m_tx_download_mutex);
87
88 /** Implement PeerManager */
89 void StartScheduledTasks(CScheduler& scheduler) override;
90 void CheckForStaleTipAndEvictPeers() override;
91 std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override
92@@ -4989,72 +4989,72 @@ bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer)
93 LogDebug(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id);
94 if (m_banman) m_banman->Discourage(pnode.addr);
95 m_connman.DisconnectNode(pnode.addr);
96 return true;
97 }
98
99-bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
100+bool PeerManagerImpl::ProcessMessages(CNode& from, std::atomic<bool>& interruptMsgProc)
101 {
102 AssertLockNotHeld(m_tx_download_mutex);
103 AssertLockHeld(g_msgproc_mutex);
104
105- PeerRef peer = GetPeerRef(pfrom->GetId());
106+ PeerRef peer = GetPeerRef(from.GetId());
107 if (peer == nullptr) return false;
108
109 // For outbound connections, ensure that the initial VERSION message
110 // has been sent first before processing any incoming messages
111- if (!pfrom->IsInboundConn() && !peer->m_outbound_version_message_sent) return false;
112+ if (!from.IsInboundConn() && !peer->m_outbound_version_message_sent) return false;
113
114 {
115 LOCK(peer->m_getdata_requests_mutex);
116 if (!peer->m_getdata_requests.empty()) {
117- ProcessGetData(*pfrom, *peer, interruptMsgProc);
118+ ProcessGetData(from, *peer, interruptMsgProc);
119 }
120 }
121
122 const bool processed_orphan = ProcessOrphanTx(*peer);
123
124- if (pfrom->fDisconnect)
125+ if (from.fDisconnect)
126 return false;
127
128 if (processed_orphan) return true;
129
130 // this maintains the order of responses
131 // and prevents m_getdata_requests to grow unbounded
132 {
133 LOCK(peer->m_getdata_requests_mutex);
134 if (!peer->m_getdata_requests.empty()) return true;
135 }
136
137 // Don't bother if send buffer is too full to respond anyway
138- if (pfrom->fPauseSend) return false;
139+ if (from.fPauseSend) return false;
140
141- auto poll_result{pfrom->PollMessage()};
142+ auto poll_result{from.PollMessage()};
143 if (!poll_result) {
144 // No message to process
145 return false;
146 }
147
148 CNetMessage& msg{poll_result->first};
149 bool fMoreWork = poll_result->second;
150
151 TRACEPOINT(net, inbound_message,
152- pfrom->GetId(),
153- pfrom->m_addr_name.c_str(),
154- pfrom->ConnectionTypeAsString().c_str(),
155+ from.GetId(),
156+ from.m_addr_name.c_str(),
157+ from.ConnectionTypeAsString().c_str(),
158 msg.m_type.c_str(),
159 msg.m_recv.size(),
160 msg.m_recv.data()
161 );
162
163 if (m_opts.capture_messages) {
164- CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
165+ CaptureMessage(from.addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
166 }
167
168 try {
169- ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc);
170+ ProcessMessage(from, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc);
171 if (interruptMsgProc) return false;
172 {
173 LOCK(peer->m_getdata_requests_mutex);
174 if (!peer->m_getdata_requests.empty()) fMoreWork = true;
175 }
176 // Does this peer has an orphan ready to reconsider?
177@@ -5481,69 +5481,69 @@ bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer)
178 peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
179 }
180
181 return true;
182 }
183
184-bool PeerManagerImpl::SendMessages(CNode* pto)
185+bool PeerManagerImpl::SendMessages(CNode& to)
186 {
187 AssertLockNotHeld(m_tx_download_mutex);
188 AssertLockHeld(g_msgproc_mutex);
189
190- PeerRef peer = GetPeerRef(pto->GetId());
191+ PeerRef peer = GetPeerRef(to.GetId());
192 if (!peer) return false;
193 const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
194
195 // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
196 // disconnect misbehaving peers even before the version handshake is complete.
197- if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true;
198+ if (MaybeDiscourageAndDisconnect(to, *peer)) return true;
199
200 // Initiate version handshake for outbound connections
201- if (!pto->IsInboundConn() && !peer->m_outbound_version_message_sent) {
202- PushNodeVersion(*pto, *peer);
203+ if (!to.IsInboundConn() && !peer->m_outbound_version_message_sent) {
204+ PushNodeVersion(to, *peer);
205 peer->m_outbound_version_message_sent = true;
206 }
207
208 // Don't send anything until the version handshake is complete
209- if (!pto->fSuccessfullyConnected || pto->fDisconnect)
210+ if (!to.fSuccessfullyConnected || to.fDisconnect)
211 return true;
212
213 const auto current_time{GetTime<std::chrono::microseconds>()};
214
215- if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
216- LogDebug(BCLog::NET, "addrfetch connection timeout, %s\n", pto->DisconnectMsg(fLogIPs));
217- pto->fDisconnect = true;
218+ if (to.IsAddrFetchConn() && current_time - to.m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
219+ LogDebug(BCLog::NET, "addrfetch connection timeout, %s\n", to.DisconnectMsg(fLogIPs));
220+ to.fDisconnect = true;
221 return true;
222 }
223
224- MaybeSendPing(*pto, *peer, current_time);
225+ MaybeSendPing(to, *peer, current_time);
226
227 // MaybeSendPing may have marked peer for disconnection
228- if (pto->fDisconnect) return true;
229+ if (to.fDisconnect) return true;
230
231- MaybeSendAddr(*pto, *peer, current_time);
232+ MaybeSendAddr(to, *peer, current_time);
233
234- MaybeSendSendHeaders(*pto, *peer);
235+ MaybeSendSendHeaders(to, *peer);
236
237 {
238 LOCK(cs_main);
239
240- CNodeState &state = *State(pto->GetId());
241+ CNodeState &state = *State(to.GetId());
242
243 // Start block sync
244 if (m_chainman.m_best_header == nullptr) {
245 m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
246 }
247
248 // Determine whether we might try initial headers sync or parallel
249 // block download from this peer -- this mostly affects behavior while
250 // in IBD (once out of IBD, we sync from all peers).
251 bool sync_blocks_and_headers_from_peer = false;
252 if (state.fPreferredDownload) {
253 sync_blocks_and_headers_from_peer = true;
254- } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
255+ } else if (CanServeBlocks(*peer) && !to.IsAddrFetchConn()) {
256 // Typically this is an inbound peer. If we don't have any outbound
257 // peers, or if we aren't downloading any blocks from such peers,
258 // then allow block downloads from this peer, too.
259 // We prefer downloading blocks from outbound peers to avoid
260 // putting undue load on (say) some home user who is just making
261 // outbound connections to the network, but if our only source of
262@@ -5565,14 +5565,14 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
263 is up-to-date. With a non-empty response, we can initialise
264 the peer's known best block. This wouldn't be possible
265 if we requested starting at m_chainman.m_best_header and
266 got back an empty response. */
267 if (pindexStart->pprev)
268 pindexStart = pindexStart->pprev;
269- if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
270- LogDebug(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
271+ if (MaybeSendGetHeaders(to, GetLocator(pindexStart), *peer)) {
272+ LogDebug(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, to.GetId(), peer->m_starting_height);
273
274 state.fSyncStarted = true;
275 peer->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
276 (
277 // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
278 // to maintain precision
279@@ -5598,13 +5598,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
280 LOCK(peer->m_block_inv_mutex);
281 std::vector<CBlock> vHeaders;
282 bool fRevertToInv = ((!peer->m_prefers_headers &&
283 (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) ||
284 peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE);
285 const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
286- ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
287+ ProcessBlockAvailability(to.GetId()); // ensure pindexBestKnownBlock is up-to-date
288
289 if (!fRevertToInv) {
290 bool fFoundStartingHeader = false;
291 // Try to find first header that our peer doesn't have, and
292 // then send all headers past that one. If we come across any
293 // headers that aren't on m_chainman.ActiveChain(), give up.
294@@ -5652,42 +5652,42 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
295 }
296 if (!fRevertToInv && !vHeaders.empty()) {
297 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
298 // We only send up to 1 block as header-and-ids, as otherwise
299 // probably means we're doing an initial-ish-sync or they're slow
300 LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
301- vHeaders.front().GetHash().ToString(), pto->GetId());
302+ vHeaders.front().GetHash().ToString(), to.GetId());
303
304 std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
305 {
306 LOCK(m_most_recent_block_mutex);
307 if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) {
308 cached_cmpctblock_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block);
309 }
310 }
311 if (cached_cmpctblock_msg.has_value()) {
312- PushMessage(*pto, std::move(cached_cmpctblock_msg.value()));
313+ PushMessage(to, std::move(cached_cmpctblock_msg.value()));
314 } else {
315 CBlock block;
316 const bool ret{m_chainman.m_blockman.ReadBlock(block, *pBestIndex)};
317 assert(ret);
318 CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()};
319- MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK, cmpctblock);
320+ MakeAndPushMessage(to, NetMsgType::CMPCTBLOCK, cmpctblock);
321 }
322 state.pindexBestHeaderSent = pBestIndex;
323 } else if (peer->m_prefers_headers) {
324 if (vHeaders.size() > 1) {
325 LogDebug(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
326 vHeaders.size(),
327 vHeaders.front().GetHash().ToString(),
328- vHeaders.back().GetHash().ToString(), pto->GetId());
329+ vHeaders.back().GetHash().ToString(), to.GetId());
330 } else {
331 LogDebug(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
332- vHeaders.front().GetHash().ToString(), pto->GetId());
333+ vHeaders.front().GetHash().ToString(), to.GetId());
334 }
335- MakeAndPushMessage(*pto, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
336+ MakeAndPushMessage(to, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
337 state.pindexBestHeaderSent = pBestIndex;
338 } else
339 fRevertToInv = true;
340 }
341 if (fRevertToInv) {
342 // If falling back to using an inv, just try to inv the tip.
343@@ -5707,13 +5707,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
344 }
345
346 // If the peer's chain has this block, don't inv it back.
347 if (!PeerHasHeader(&state, pindex)) {
348 peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
349 LogDebug(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
350- pto->GetId(), hashToAnnounce.ToString());
351+ to.GetId(), hashToAnnounce.ToString());
352 }
353 }
354 }
355 peer->m_blocks_for_headers_relay.clear();
356 }
357
358@@ -5726,26 +5726,26 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
359 vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET));
360
361 // Add blocks
362 for (const uint256& hash : peer->m_blocks_for_inv_relay) {
363 vInv.emplace_back(MSG_BLOCK, hash);
364 if (vInv.size() == MAX_INV_SZ) {
365- MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
366+ MakeAndPushMessage(to, NetMsgType::INV, vInv);
367 vInv.clear();
368 }
369 }
370 peer->m_blocks_for_inv_relay.clear();
371 }
372
373 if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
374 LOCK(tx_relay->m_tx_inventory_mutex);
375 // Check whether periodic sends should happen
376- bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
377+ bool fSendTrickle = to.HasPermission(NetPermissionFlags::NoBan);
378 if (tx_relay->m_next_inv_send_time < current_time) {
379 fSendTrickle = true;
380- if (pto->IsInboundConn()) {
381+ if (to.IsInboundConn()) {
382 tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
383 } else {
384 tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
385 }
386 }
387
388@@ -5779,13 +5779,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
389 if (tx_relay->m_bloom_filter) {
390 if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
391 }
392 tx_relay->m_tx_inventory_known_filter.insert(inv.hash);
393 vInv.push_back(inv);
394 if (vInv.size() == MAX_INV_SZ) {
395- MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
396+ MakeAndPushMessage(to, NetMsgType::INV, vInv);
397 vInv.clear();
398 }
399 }
400 }
401
402 // Determine transactions to relay
403@@ -5831,34 +5831,34 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
404 }
405 if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
406 // Send
407 vInv.push_back(inv);
408 nRelayedTransactions++;
409 if (vInv.size() == MAX_INV_SZ) {
410- MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
411+ MakeAndPushMessage(to, NetMsgType::INV, vInv);
412 vInv.clear();
413 }
414 tx_relay->m_tx_inventory_known_filter.insert(hash);
415 }
416
417 // Ensure we'll respond to GETDATA requests for anything we've just announced
418 LOCK(m_mempool.cs);
419 tx_relay->m_last_inv_sequence = m_mempool.GetSequence();
420 }
421 }
422 if (!vInv.empty())
423- MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
424+ MakeAndPushMessage(to, NetMsgType::INV, vInv);
425
426 // Detect whether we're stalling
427 auto stalling_timeout = m_block_stalling_timeout.load();
428 if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
429 // Stalling only triggers when the block download window cannot move. During normal steady state,
430 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
431 // should only happen during initial block download.
432- LogInfo("Peer is stalling block download, %s\n", pto->DisconnectMsg(fLogIPs));
433- pto->fDisconnect = true;
434+ LogInfo("Peer is stalling block download, %s\n", to.DisconnectMsg(fLogIPs));
435+ to.fDisconnect = true;
436 // Increase timeout for the next peer so that we don't disconnect multiple peers if our own
437 // bandwidth is insufficient.
438 const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
439 if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
440 LogDebug(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout));
441 }
442@@ -5870,14 +5870,14 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
443 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
444 // to unreasonably increase our timeout.
445 if (state.vBlocksInFlight.size() > 0) {
446 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
447 int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
448 if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
449- LogInfo("Timeout downloading block %s, %s\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->DisconnectMsg(fLogIPs));
450- pto->fDisconnect = true;
451+ LogInfo("Timeout downloading block %s, %s\n", queuedBlock.pindex->GetBlockHash().ToString(), to.DisconnectMsg(fLogIPs));
452+ to.fDisconnect = true;
453 return true;
454 }
455 }
456 // Check for headers sync timeouts
457 if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
458 // Detect whether this is a stalling initial-headers-sync peer
459@@ -5885,18 +5885,18 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
460 if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
461 // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
462 // and we have others we could be using instead.
463 // Note: If all our peers are inbound, then we won't
464 // disconnect our sync peer for stalling; we have bigger
465 // problems if we can't get any outbound peers.
466- if (!pto->HasPermission(NetPermissionFlags::NoBan)) {
467- LogInfo("Timeout downloading headers, %s\n", pto->DisconnectMsg(fLogIPs));
468- pto->fDisconnect = true;
469+ if (!to.HasPermission(NetPermissionFlags::NoBan)) {
470+ LogInfo("Timeout downloading headers, %s\n", to.DisconnectMsg(fLogIPs));
471+ to.fDisconnect = true;
472 return true;
473 } else {
474- LogInfo("Timeout downloading headers from noban peer, not %s\n", pto->DisconnectMsg(fLogIPs));
475+ LogInfo("Timeout downloading headers from noban peer, not %s\n", to.DisconnectMsg(fLogIPs));
476 // Reset the headers sync state so that we have a
477 // chance to try downloading from a different peer.
478 // Note: this will also result in at least one more
479 // getheaders message to be sent to
480 // this peer (eventually).
481 state.fSyncStarted = false;
482@@ -5910,13 +5910,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
483 peer->m_headers_sync_timeout = std::chrono::microseconds::max();
484 }
485 }
486
487 // Check that outbound peers have reasonable chains
488 // GetTime() is used by this anti-DoS logic so we can test this using mocktime
489- ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
490+ ConsiderEviction(to, *peer, GetTime<std::chrono::seconds>());
491
492 //
493 // Message: getdata (blocks)
494 //
495 std::vector<CInv> vGetData;
496 if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
497@@ -5939,15 +5939,15 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
498 vToDownload, from_tip,
499 Assert(m_chainman.GetSnapshotBaseBlock()));
500 }
501 for (const CBlockIndex *pindex : vToDownload) {
502 uint32_t nFetchFlags = GetFetchFlags(*peer);
503 vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
504- BlockRequested(pto->GetId(), *pindex);
505+ BlockRequested(to.GetId(), *pindex);
506 LogDebug(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
507- pindex->nHeight, pto->GetId());
508+ pindex->nHeight, to.GetId());
509 }
510 if (state.vBlocksInFlight.empty() && staller != -1) {
511 if (State(staller)->m_stalling_since == 0us) {
512 State(staller)->m_stalling_since = current_time;
513 LogDebug(BCLog::NET, "Stall started peer=%d\n", staller);
514 }
515@@ -5956,21 +5956,21 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
516
517 //
518 // Message: getdata (transactions)
519 //
520 {
521 LOCK(m_tx_download_mutex);
522- for (const GenTxid& gtxid : m_txdownloadman.GetRequestsToSend(pto->GetId(), current_time)) {
523+ for (const GenTxid& gtxid : m_txdownloadman.GetRequestsToSend(to.GetId(), current_time)) {
524 vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash());
525 if (vGetData.size() >= MAX_GETDATA_SZ) {
526- MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData);
527+ MakeAndPushMessage(to, NetMsgType::GETDATA, vGetData);
528 vGetData.clear();
529 }
530 }
531 }
532
533 if (!vGetData.empty())
534- MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData);
535+ MakeAndPushMessage(to, NetMsgType::GETDATA, vGetData);
536 } // release cs_main
537- MaybeSendFeefilter(*pto, *peer, current_time);
538+ MaybeSendFeefilter(to, *peer, current_time);
539 return true;
540 }
541diff --git i/src/test/denialofservice_tests.cpp w/src/test/denialofservice_tests.cpp
542index 9ee7e9c9fe..1d658ac9e0 100644
543--- i/src/test/denialofservice_tests.cpp
544+++ w/src/test/denialofservice_tests.cpp
545@@ -77,33 +77,33 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
546 LOCK(cs_main);
547 BOOST_CHECK(m_node.chainman->ActiveChain().Tip() != nullptr);
548 BOOST_CHECK(m_node.chainman->ActiveChain().Tip()->nChainWork > 0);
549 }
550
551 // Test starts here
552- BOOST_CHECK(peerman.SendMessages(&dummyNode1)); // should result in getheaders
553+ BOOST_CHECK(peerman.SendMessages(dummyNode1)); // should result in getheaders
554
555 {
556 LOCK(dummyNode1.cs_vSend);
557 const auto& [to_send, _more, _msg_type] = dummyNode1.m_transport->GetBytesToSend(false);
558 BOOST_CHECK(!to_send.empty());
559 }
560 connman.FlushSendBuffer(dummyNode1);
561
562 int64_t nStartTime = GetTime();
563 // Wait 21 minutes
564 SetMockTime(nStartTime+21*60);
565- BOOST_CHECK(peerman.SendMessages(&dummyNode1)); // should result in getheaders
566+ BOOST_CHECK(peerman.SendMessages(dummyNode1)); // should result in getheaders
567 {
568 LOCK(dummyNode1.cs_vSend);
569 const auto& [to_send, _more, _msg_type] = dummyNode1.m_transport->GetBytesToSend(false);
570 BOOST_CHECK(!to_send.empty());
571 }
572 // Wait 3 more minutes
573 SetMockTime(nStartTime+24*60);
574- BOOST_CHECK(peerman.SendMessages(&dummyNode1)); // should result in disconnect
575+ BOOST_CHECK(peerman.SendMessages(dummyNode1)); // should result in disconnect
576 BOOST_CHECK(dummyNode1.fDisconnect == true);
577
578 peerman.FinalizeNode(dummyNode1);
579 }
580
581 struct OutboundTest : TestingSetup {
582@@ -330,13 +330,13 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
583 /*inbound_onion=*/false};
584 nodes[0]->SetCommonVersion(PROTOCOL_VERSION);
585 peerLogic->InitializeNode(*nodes[0], NODE_NETWORK);
586 nodes[0]->fSuccessfullyConnected = true;
587 connman->AddTestNode(*nodes[0]);
588 peerLogic->UnitTestMisbehaving(nodes[0]->GetId()); // Should be discouraged
589- BOOST_CHECK(peerLogic->SendMessages(nodes[0]));
590+ BOOST_CHECK(peerLogic->SendMessages(*nodes[0]));
591
592 BOOST_CHECK(banman->IsDiscouraged(addr[0]));
593 BOOST_CHECK(nodes[0]->fDisconnect);
594 BOOST_CHECK(!banman->IsDiscouraged(other_addr)); // Different address, not discouraged
595
596 nodes[1] = new CNode{id++,
597@@ -349,21 +349,21 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
598 ConnectionType::INBOUND,
599 /*inbound_onion=*/false};
600 nodes[1]->SetCommonVersion(PROTOCOL_VERSION);
601 peerLogic->InitializeNode(*nodes[1], NODE_NETWORK);
602 nodes[1]->fSuccessfullyConnected = true;
603 connman->AddTestNode(*nodes[1]);
604- BOOST_CHECK(peerLogic->SendMessages(nodes[1]));
605+ BOOST_CHECK(peerLogic->SendMessages(*nodes[1]));
606 // [0] is still discouraged/disconnected.
607 BOOST_CHECK(banman->IsDiscouraged(addr[0]));
608 BOOST_CHECK(nodes[0]->fDisconnect);
609 // [1] is not discouraged/disconnected yet.
610 BOOST_CHECK(!banman->IsDiscouraged(addr[1]));
611 BOOST_CHECK(!nodes[1]->fDisconnect);
612 peerLogic->UnitTestMisbehaving(nodes[1]->GetId());
613- BOOST_CHECK(peerLogic->SendMessages(nodes[1]));
614+ BOOST_CHECK(peerLogic->SendMessages(*nodes[1]));
615 // Expect both [0] and [1] to be discouraged/disconnected now.
616 BOOST_CHECK(banman->IsDiscouraged(addr[0]));
617 BOOST_CHECK(nodes[0]->fDisconnect);
618 BOOST_CHECK(banman->IsDiscouraged(addr[1]));
619 BOOST_CHECK(nodes[1]->fDisconnect);
620
621@@ -380,13 +380,13 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
622 /*inbound_onion=*/false};
623 nodes[2]->SetCommonVersion(PROTOCOL_VERSION);
624 peerLogic->InitializeNode(*nodes[2], NODE_NETWORK);
625 nodes[2]->fSuccessfullyConnected = true;
626 connman->AddTestNode(*nodes[2]);
627 peerLogic->UnitTestMisbehaving(nodes[2]->GetId());
628- BOOST_CHECK(peerLogic->SendMessages(nodes[2]));
629+ BOOST_CHECK(peerLogic->SendMessages(*nodes[2]));
630 BOOST_CHECK(banman->IsDiscouraged(addr[0]));
631 BOOST_CHECK(banman->IsDiscouraged(addr[1]));
632 BOOST_CHECK(banman->IsDiscouraged(addr[2]));
633 BOOST_CHECK(nodes[0]->fDisconnect);
634 BOOST_CHECK(nodes[1]->fDisconnect);
635 BOOST_CHECK(nodes[2]->fDisconnect);
636@@ -422,13 +422,13 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
637 /*inbound_onion=*/false};
638 dummyNode.SetCommonVersion(PROTOCOL_VERSION);
639 peerLogic->InitializeNode(dummyNode, NODE_NETWORK);
640 dummyNode.fSuccessfullyConnected = true;
641
642 peerLogic->UnitTestMisbehaving(dummyNode.GetId());
643- BOOST_CHECK(peerLogic->SendMessages(&dummyNode));
644+ BOOST_CHECK(peerLogic->SendMessages(dummyNode));
645 BOOST_CHECK(banman->IsDiscouraged(addr));
646
647 peerLogic->FinalizeNode(dummyNode);
648 }
649
650 BOOST_AUTO_TEST_SUITE_END()
651diff --git i/src/test/fuzz/p2p_handshake.cpp w/src/test/fuzz/p2p_handshake.cpp
652index d608efd87a..183896bf3c 100644
653--- i/src/test/fuzz/p2p_handshake.cpp
654+++ w/src/test/fuzz/p2p_handshake.cpp
655@@ -97,12 +97,12 @@ FUZZ_TARGET(p2p_handshake, .init = ::initialize)
656 connection.fPauseSend = false;
657
658 try {
659 more_work = connman.ProcessMessagesOnce(connection);
660 } catch (const std::ios_base::failure&) {
661 }
662- peerman->SendMessages(&connection);
663+ peerman->SendMessages(connection);
664 }
665 }
666
667 g_setup->m_node.connman->StopNodes();
668 }
669diff --git i/src/test/fuzz/p2p_headers_presync.cpp w/src/test/fuzz/p2p_headers_presync.cpp
670index b31b74ee4f..88072c7570 100644
671--- i/src/test/fuzz/p2p_headers_presync.cpp
672+++ w/src/test/fuzz/p2p_headers_presync.cpp
673@@ -94,13 +94,13 @@ void HeadersSyncSetup::SendMessage(FuzzedDataProvider& fuzzed_data_provider, CSe
674 (void)connman.ReceiveMsgFrom(connection, std::move(msg));
675 connection.fPauseSend = false;
676 try {
677 connman.ProcessMessagesOnce(connection);
678 } catch (const std::ios_base::failure&) {
679 }
680- m_node.peerman->SendMessages(&connection);
681+ m_node.peerman->SendMessages(connection);
682 }
683
684 CBlockHeader ConsumeHeader(FuzzedDataProvider& fuzzed_data_provider, const uint256& prev_hash, uint32_t prev_nbits)
685 {
686 CBlockHeader header;
687 header.nNonce = 0;
688diff --git i/src/test/fuzz/process_message.cpp w/src/test/fuzz/process_message.cpp
689index 4bd38a1ac6..2a304cbd4b 100644
690--- i/src/test/fuzz/process_message.cpp
691+++ w/src/test/fuzz/process_message.cpp
692@@ -85,11 +85,11 @@ FUZZ_TARGET(process_message, .init = initialize_process_message)
693 while (more_work) {
694 p2p_node.fPauseSend = false;
695 try {
696 more_work = connman.ProcessMessagesOnce(p2p_node);
697 } catch (const std::ios_base::failure&) {
698 }
699- g_setup->m_node.peerman->SendMessages(&p2p_node);
700+ g_setup->m_node.peerman->SendMessages(p2p_node);
701 }
702 g_setup->m_node.validation_signals->SyncWithValidationInterfaceQueue();
703 g_setup->m_node.connman->StopNodes();
704 }
705diff --git i/src/test/fuzz/process_messages.cpp w/src/test/fuzz/process_messages.cpp
706index 0688868c02..342a4038b9 100644
707--- i/src/test/fuzz/process_messages.cpp
708+++ w/src/test/fuzz/process_messages.cpp
709@@ -84,12 +84,12 @@ FUZZ_TARGET(process_messages, .init = initialize_process_messages)
710 random_node.fPauseSend = false;
711
712 try {
713 more_work = connman.ProcessMessagesOnce(random_node);
714 } catch (const std::ios_base::failure&) {
715 }
716- g_setup->m_node.peerman->SendMessages(&random_node);
717+ g_setup->m_node.peerman->SendMessages(random_node);
718 }
719 }
720 g_setup->m_node.validation_signals->SyncWithValidationInterfaceQueue();
721 g_setup->m_node.connman->StopNodes();
722 }
723diff --git i/src/test/net_tests.cpp w/src/test/net_tests.cpp
724index 0036d94c2f..ce10b50417 100644
725--- i/src/test/net_tests.cpp
726+++ w/src/test/net_tests.cpp
727@@ -875,13 +875,13 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message)
728 return;
729 }
730 }
731 }
732 };
733
734- m_node.peerman->SendMessages(&peer);
735+ m_node.peerman->SendMessages(peer);
736
737 BOOST_CHECK(sent);
738
739 CaptureMessage = CaptureMessageOrig;
740 chainman.ResetIbd();
741 m_node.args->ForceSetArg("-capturemessages", "0");
742diff --git i/src/test/util/net.cpp w/src/test/util/net.cpp
743index 0b8070a88e..4f4496c93e 100644
744--- i/src/test/util/net.cpp
745+++ w/src/test/util/net.cpp
746@@ -28,13 +28,13 @@ void ConnmanTestMsg::Handshake(CNode& node,
747 bool relay_txs)
748 {
749 auto& peerman{static_cast<PeerManager&>(*m_msgproc)};
750 auto& connman{*this};
751
752 peerman.InitializeNode(node, local_services);
753- peerman.SendMessages(&node);
754+ peerman.SendMessages(node);
755 FlushSendBuffer(node); // Drop the version message added by SendMessages.
756
757 CSerializedNetMsg msg_version{
758 NetMsg::Make(NetMsgType::VERSION,
759 version, //
760 Using<CustomUintFormatter<8>>(remote_services), //
761@@ -49,13 +49,13 @@ void ConnmanTestMsg::Handshake(CNode& node,
762 relay_txs),
763 };
764
765 (void)connman.ReceiveMsgFrom(node, std::move(msg_version));
766 node.fPauseSend = false;
767 connman.ProcessMessagesOnce(node);
768- peerman.SendMessages(&node);
769+ peerman.SendMessages(node);
770 FlushSendBuffer(node); // Drop the verack message added by SendMessages.
771 if (node.fDisconnect) return;
772 assert(node.nVersion == version);
773 assert(node.GetCommonVersion() == std::min(version, PROTOCOL_VERSION));
774 CNodeStateStats statestats;
775 assert(peerman.GetNodeStateStats(node.GetId(), statestats));
776@@ -63,13 +63,13 @@ void ConnmanTestMsg::Handshake(CNode& node,
777 assert(statestats.their_services == remote_services);
778 if (successfully_connected) {
779 CSerializedNetMsg msg_verack{NetMsg::Make(NetMsgType::VERACK)};
780 (void)connman.ReceiveMsgFrom(node, std::move(msg_verack));
781 node.fPauseSend = false;
782 connman.ProcessMessagesOnce(node);
783- peerman.SendMessages(&node);
784+ peerman.SendMessages(node);
785 assert(node.fSuccessfullyConnected == true);
786 }
787 }
788
789 void ConnmanTestMsg::NodeReceiveMsgBytes(CNode& node, std::span<const uint8_t> msg_bytes, bool& complete) const
790 {
791diff --git i/src/test/util/net.h w/src/test/util/net.h
792index 36c5dc5c17..0a0719c735 100644
793--- i/src/test/util/net.h
794+++ w/src/test/util/net.h
795@@ -70,15 +70,15 @@ struct ConnmanTestMsg : public CConnman {
796 ServiceFlags remote_services,
797 ServiceFlags local_services,
798 int32_t version,
799 bool relay_txs)
800 EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex);
801
802- bool ProcessMessagesOnce(CNode& node) EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex)
803+ bool ProcessMessagesOnce(CNode& from) EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex)
804 {
805- return m_msgproc->ProcessMessages(&node, flagInterruptMsgProc);
806+ return m_msgproc->ProcessMessages(from, flagInterruptMsgProc);
807 }
808
809 void NodeReceiveMsgBytes(CNode& node, std::span<const uint8_t> msg_bytes, bool& complete) const;
810
811 bool ReceiveMsgFrom(CNode& node, CSerializedNetMsg&& ser_msg) const;
812 void FlushSendBuffer(CNode& node) const;