To clarify, currently AssumeUTXO doesn't display the warning:
2025-03-18T10:08:47Z [snapshot] 184000000 coins loaded (99.56%, 29332 MB)
2025-03-18T10:08:48Z [snapshot] loaded 184821030 (29456 MB) coins from snapshot 000000000000000000010b17283c3c400507969a9c2afd1dcf2082ec5cca2880
2025-03-18T10:08:48Z FlushSnapshotToDisk: saving snapshot chainstate (29456 MB) started
2025-03-18T10:09:57Z FlushSnapshotToDisk: completed (69461.66ms)
But if we moved the warning introduced in #31534 into to the method that does the actual long operation it would show the warning for AssumeUTXO case as well (i.e. both for Flush and Sync):
diff --git a/src/coins.h b/src/coins.h
index f897bce749..a23b663a1a 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -300,6 +300,7 @@ struct CoinsViewCacheCursor
}
inline bool WillErase(CoinsCachePair& current) const noexcept { return m_will_erase || current.second.coin.IsSpent(); }
+ size_t GetDirtyCount() const noexcept { return m_dirty; }
private:
size_t& m_usage;
size_t& m_dirty;
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 1622039d63..083cab3279 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -25,6 +25,9 @@ static constexpr uint8_t DB_HEAD_BLOCKS{'H'};
// Keys used in previous version that might still be found in the DB:
static constexpr uint8_t DB_COINS{'c'};
+// Threshold for warning when writing this many dirty cache entries to disk.
+static constexpr size_t WARN_FLUSH_COINS_COUNT{10'000'000};
+
bool CCoinsViewDB::NeedsUpgrade()
{
std::unique_ptr<CDBIterator> cursor{m_db->NewIterator()};
@@ -109,6 +112,8 @@ bool CCoinsViewDB::BatchWrite(CoinsViewCacheCursor& cursor, const uint256 &hashB
}
}
+ if (cursor.GetDirtyCount() > WARN_FLUSH_COINS_COUNT) LogWarning("Flushing large (%d entries) UTXO set to disk, it may take several minutes", cursor.GetDirtyCount());
+
// In the first batch, mark the database as being in the middle of a
// transition from old_tip to hashBlock.
// A vector is used for future extensibility, as we may want to support
diff --git a/src/validation.cpp b/src/validation.cpp
index 1f4f2e1ae8..3323a037dc 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -88,8 +88,6 @@ using node::CBlockIndexHeightOnlyComparator;
using node::CBlockIndexWorkComparator;
using node::SnapshotMetadata;
-/** Threshold for warning when writing this many dirty cache entries to disk. */
-static constexpr size_t WARN_FLUSH_COINS_COUNT = 10'000'000;
/** Time to wait between writing blocks/block index to disk. */
static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
/** Time to wait between flushing chainstate to disk. */
@@ -2932,7 +2930,6 @@ bool Chainstate::FlushStateToDisk(
}
// Flush best chain related state. This can only be done if the blocks / block index write was also done.
if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
- if (coins_dirty_count >= WARN_FLUSH_COINS_COUNT) LogWarning("Flushing large (%d entries) UTXO set to disk, it may take several minutes", coins_dirty_count);
LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d out of %d cached coins)",
coins_dirty_count, coins_count), BCLog::BENCH);
Reproducer:
cmake -B build -DCMAKE_BUILD_TYPE=Release && cmake --build build -j$(nproc)
mkdir -p demo && rm -rfd demo/chainstate demo/chainstate_snapshot demo/debug.log
build/bin/bitcoind -datadir=demo -stopatheight=1
build/bin/bitcoind -datadir=demo -daemon -blocksonly=1 -connect=0 -dbcache=30000
build/bin/bitcoin-cli -datadir=demo -rpcclienttimeout=0 loadtxoutset ~/utxo-880000.dat
build/bin/bitcoin-cli -datadir=demo stop
Result:
2025-03-18T09:59:40Z [snapshot] 184000000 coins loaded (99.56%, 29332 MB)
2025-03-18T09:59:41Z [snapshot] loaded 184821030 (29456 MB) coins from snapshot 000000000000000000010b17283c3c400507969a9c2afd1dcf2082ec5cca2880
2025-03-18T09:59:41Z FlushSnapshotToDisk: saving snapshot chainstate (29456 MB) started
*2025-03-18T09:59:41Z [warning] Flushing large (184821030 entries) UTXO set to disk, it may take several minutes*
2025-03-18T10:00:50Z FlushSnapshotToDisk: completed (68776.27ms)
Would it be simpler if I pushed this change in a separate PR instead?