given that this delay is written multiple times, would that be more verbose?
Alternatively maybe we could add an m_next_write
next to m_last_write
, like:
0diff --git a/src/validation.cpp b/src/validation.cpp
1index b54446be4e..e2c7272531 100644
2--- a/src/validation.cpp
3+++ b/src/validation.cpp
4@@ -86,10 +86,9 @@ using node::CBlockIndexHeightOnlyComparator;
5 using node::CBlockIndexWorkComparator;
6 using node::SnapshotMetadata;
7
8-/** Time to wait between writing blocks/block index and chainstate to disk. */
9-static constexpr std::chrono::minutes DATABASE_WRITE_INTERVAL{50};
10-/** Range of random delay to add to DATABASE_WRITE_INTERVAL */
11-static constexpr uint32_t DATABASE_WRITE_INTERVAL_RANDOM_DELAY_RANGE{20};
12+/** Minimum and maximum time to wait between writing blocks/block index and chainstate to disk. */
13+static constexpr auto DATABASE_WRITE_INTERVAL_MIN{50min};
14+static constexpr auto DATABASE_WRITE_INTERVAL_MAX{70min};
15 /** Maximum age of our tip for us to be considered current for fee estimation */
16 static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3};
17 const std::vector<std::string> CHECKLEVEL_DOC {
18@@ -2776,6 +2775,11 @@ CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState(
19 return CoinsCacheSizeState::OK;
20 }
21
22+NodeClock::time_point Chainstate::CalculateNextWrite(NodeClock::time_point after) const
23+{
24+ return FastRandomContext().rand_uniform_delay(after + DATABASE_WRITE_INTERVAL_MIN, DATABASE_WRITE_INTERVAL_MAX - DATABASE_WRITE_INTERVAL_MIN);
25+}
26+
27 bool Chainstate::FlushStateToDisk(
28 BlockValidationState &state,
29 FlushStateMode mode,
30@@ -2840,14 +2844,14 @@ bool Chainstate::FlushStateToDisk(
31 // Avoid writing/flushing immediately after startup.
32 if (m_last_write == decltype(m_last_write){}) {
33 m_last_write = nNow;
34- m_random_write_delay = std::chrono::minutes{(FastRandomContext().randrange(DATABASE_WRITE_INTERVAL_RANDOM_DELAY_RANGE))};
35+ m_next_write = CalculateNextWrite(m_last_write);
36 }
37 // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
38 bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
39 // The cache is over the limit, we have to write now.
40 bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
41 // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
42- bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > m_last_write + DATABASE_WRITE_INTERVAL + m_random_write_delay;
43+ bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow >= m_next_write;
44 // Combine all conditions that result in a full cache flush.
45 bool fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicWrite || fFlushForPrune;
46 // Write blocks, block index and best chain related state to disk.
47@@ -2882,7 +2886,7 @@ bool Chainstate::FlushStateToDisk(
48 m_blockman.UnlinkPrunedFiles(setFilesToPrune);
49 }
50 m_last_write = nNow;
51- m_random_write_delay = std::chrono::minutes{(FastRandomContext().randrange(DATABASE_WRITE_INTERVAL_RANDOM_DELAY_RANGE))};
52+ m_next_write = CalculateNextWrite(m_last_write);
53
54 if (!CoinsTip().GetBestBlock().IsNull()) {
55 LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d coins, %.2fkB)",
56diff --git a/src/validation.h b/src/validation.h
57index dfe33b8e29..953793bdf7 100644
58--- a/src/validation.h
59+++ b/src/validation.h
60@@ -810,7 +810,9 @@ private:
61 EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
62
63 NodeClock::time_point m_last_write{};
64- std::chrono::minutes m_random_write_delay{};
65+ NodeClock::time_point m_next_write{};
66+
67+ NodeClock::time_point CalculateNextWrite(NodeClock::time_point after) const;
68
69 /**
70 * In case of an invalid snapshot, rename the coins leveldb directory so
note: this way it should probably be nNow >= m_next_write
instead of nNow > m_next_write
to be consistent with the naming.