Bitcoin ABC 0.26.3
P2P Digital Currency
Loading...
Searching...
No Matches
validation.cpp
Go to the documentation of this file.
1// Copyright (c) 2009-2010 Satoshi Nakamoto
2// Copyright (c) 2009-2018 The Bitcoin Core developers
3// Copyright (c) 2017-2020 The Bitcoin developers
4// Distributed under the MIT software license, see the accompanying
5// file COPYING or http://www.opensource.org/licenses/mit-license.php.
6
7#include <validation.h>
8
10#include <kernel/coinstats.h>
14
15#include <arith_uint256.h>
16#include <avalanche/avalanche.h>
17#include <avalanche/processor.h>
18#include <blockvalidity.h>
19#include <chainparams.h>
20#include <checkpoints.h>
21#include <checkqueue.h>
22#include <common/args.h>
23#include <config.h>
25#include <consensus/amount.h>
26#include <consensus/merkle.h>
27#include <consensus/tx_check.h>
28#include <consensus/tx_verify.h>
30#include <hash.h>
32#include <logging.h>
33#include <logging/timer.h>
34#include <minerfund.h>
35#include <node/blockstorage.h>
36#include <node/utxo_snapshot.h>
39#include <policy/block/rtt.h>
41#include <policy/policy.h>
42#include <policy/settings.h>
43#include <pow/pow.h>
44#include <primitives/block.h>
46#include <random.h>
47#include <reverse_iterator.h>
48#include <script/script.h>
49#include <script/scriptcache.h>
50#include <script/sigcache.h>
51#include <shutdown.h>
52#include <tinyformat.h>
53#include <txdb.h>
54#include <txmempool.h>
55#include <undo.h>
56#include <util/check.h> // For NDEBUG compile time check
57#include <util/fs.h>
58#include <util/fs_helpers.h>
59#include <util/strencodings.h>
60#include <util/string.h>
61#include <util/time.h>
62#include <util/trace.h>
63#include <util/translation.h>
64#include <validationinterface.h>
65#include <warnings.h>
66
67#include <algorithm>
68#include <atomic>
69#include <cassert>
70#include <chrono>
71#include <deque>
72#include <numeric>
73#include <optional>
74#include <string>
75#include <thread>
76
82
86using node::BlockMap;
87using node::fReindex;
90
91#define MICRO 0.000001
92#define MILLI 0.001
93
95static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
97static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
98const std::vector<std::string> CHECKLEVEL_DOC{
99 "level 0 reads the blocks from disk",
100 "level 1 verifies block validity",
101 "level 2 verifies undo data",
102 "level 3 checks disconnection of tip blocks",
103 "level 4 tries to reconnect the blocks",
104 "each level includes the checks of the previous levels",
105};
112static constexpr int PRUNE_LOCK_BUFFER{10};
113
114static constexpr uint64_t HEADERS_TIME_VERSION{1};
115
117std::condition_variable g_best_block_cv;
119
121 : excessiveBlockSize(config.GetMaxBlockSize()), checkPoW(true),
122 checkMerkleRoot(true) {}
123
124const CBlockIndex *
127
128 // Find the latest block common to locator and chain - we expect that
129 // locator.vHave is sorted descending by height.
130 for (const BlockHash &hash : locator.vHave) {
131 const CBlockIndex *pindex{m_blockman.LookupBlockIndex(hash)};
132 if (pindex) {
133 if (m_chain.Contains(pindex)) {
134 return pindex;
135 }
136 if (pindex->GetAncestor(m_chain.Height()) == m_chain.Tip()) {
137 return m_chain.Tip();
138 }
139 }
140 }
141 return m_chain.Genesis();
142}
143
144static uint32_t GetNextBlockScriptFlags(const CBlockIndex *pindex,
145 const ChainstateManager &chainman);
146
147namespace {
159std::optional<std::vector<int>> CalculatePrevHeights(const CBlockIndex &tip,
160 const CCoinsView &coins,
161 const CTransaction &tx) {
162 std::vector<int> prev_heights;
163 prev_heights.resize(tx.vin.size());
164 for (size_t i = 0; i < tx.vin.size(); ++i) {
165 const CTxIn &txin = tx.vin[i];
166 Coin coin;
167 if (!coins.GetCoin(txin.prevout, coin)) {
168 LogPrintf("ERROR: %s: Missing input %d in transaction \'%s\'\n",
169 __func__, i, tx.GetId().GetHex());
170 return std::nullopt;
171 }
172 if (coin.GetHeight() == MEMPOOL_HEIGHT) {
173 // Assume all mempool transaction confirm in the next block.
174 prev_heights[i] = tip.nHeight + 1;
175 } else {
176 prev_heights[i] = coin.GetHeight();
177 }
178 }
179 return prev_heights;
180}
181} // namespace
182
183std::optional<LockPoints> CalculateLockPointsAtTip(CBlockIndex *tip,
184 const CCoinsView &coins_view,
185 const CTransaction &tx) {
186 assert(tip);
187
188 auto prev_heights{CalculatePrevHeights(*tip, coins_view, tx)};
189 if (!prev_heights.has_value()) {
190 return std::nullopt;
191 }
192
195 // When SequenceLocks() is called within ConnectBlock(), the height
196 // of the block *being* evaluated is what is used.
197 // Thus if we want to know if a transaction can be part of the
198 // *next* block, we need to use one more than
199 // active_chainstate.m_chain.Height()
200 next_tip.nHeight = tip->nHeight + 1;
201 const auto [min_height, min_time] = CalculateSequenceLocks(
203
204 return LockPoints{min_height, min_time};
205}
206
208 assert(tip != nullptr);
209
210 CBlockIndex index;
211 index.pprev = tip;
212 // CheckSequenceLocksAtTip() uses active_chainstate.m_chain.Height()+1 to
213 // evaluate height based locks because when SequenceLocks() is called within
214 // ConnectBlock(), the height of the block *being* evaluated is what is
215 // used. Thus if we want to know if a transaction can be part of the *next*
216 // block, we need to use one more than active_chainstate.m_chain.Height()
217 index.nHeight = tip->nHeight + 1;
218
219 return EvaluateSequenceLocks(index, {lock_points.height, lock_points.time});
220}
221
222// Command-line argument "-replayprotectionactivationtime=<timestamp>" will
223// cause the node to switch to replay protected SigHash ForkID value when the
224// median timestamp of the previous 11 blocks is greater than or equal to
225// <timestamp>. Defaults to the pre-defined timestamp when not set.
228 return nMedianTimePast >= gArgs.GetIntArg("-replayprotectionactivationtime",
230}
231
233 const CBlockIndex *pindexPrev) {
234 if (pindexPrev == nullptr) {
235 return false;
236 }
237
238 return IsReplayProtectionEnabled(params, pindexPrev->GetMedianTimePast());
239}
240
247 const CTransaction &tx, TxValidationState &state,
248 const CCoinsViewCache &view, const CTxMemPool &pool, const uint32_t flags,
253
255 for (const CTxIn &txin : tx.vin) {
256 const Coin &coin = view.AccessCoin(txin.prevout);
257
258 // This coin was checked in PreChecks and MemPoolAccept
259 // has been holding cs_main since then.
260 Assume(!coin.IsSpent());
261 if (coin.IsSpent()) {
262 return false;
263 }
264
265 // If the Coin is available, there are 2 possibilities:
266 // it is available in our current ChainstateActive UTXO set,
267 // or it's a UTXO provided by a transaction in our mempool.
268 // Ensure the scriptPubKeys in Coins from CoinsView are correct.
269 const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId());
270 if (txFrom) {
271 assert(txFrom->GetId() == txin.prevout.GetTxId());
272 assert(txFrom->vout.size() > txin.prevout.GetN());
273 assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut());
274 } else {
275 const Coin &coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
276 assert(!coinFromUTXOSet.IsSpent());
277 assert(coinFromUTXOSet.GetTxOut() == coin.GetTxOut());
278 }
279 }
280
281 // Call CheckInputScripts() to cache signature and script validity against
282 // current tip consensus rules.
283 return CheckInputScripts(tx, state, view, flags, /*sigCacheStore=*/true,
284 /*scriptCacheStore=*/true, txdata, nSigChecksOut);
285}
286
287namespace {
288
289class MemPoolAccept {
290public:
291 MemPoolAccept(CTxMemPool &mempool, Chainstate &active_chainstate)
292 : m_pool(mempool), m_view(&m_dummy),
293 m_viewmempool(&active_chainstate.CoinsTip(), m_pool),
294 m_active_chainstate(active_chainstate) {}
295
296 // We put the arguments we're handed into a struct, so we can pass them
297 // around easier.
298 struct ATMPArgs {
299 const Config &m_config;
300 const int64_t m_accept_time;
301 const bool m_bypass_limits;
302 /*
303 * Return any outpoints which were not previously present in the coins
304 * cache, but were added as a result of validating the tx for mempool
305 * acceptance. This allows the caller to optionally remove the cache
306 * additions if the associated transaction ends up being rejected by
307 * the mempool.
308 */
309 std::vector<COutPoint> &m_coins_to_uncache;
310 const bool m_test_accept;
311 const unsigned int m_heightOverride;
317 const bool m_package_submission;
323 const bool m_package_feerates;
324
326 static ATMPArgs SingleAccept(const Config &config, int64_t accept_time,
327 bool bypass_limits,
328 std::vector<COutPoint> &coins_to_uncache,
329 bool test_accept,
330 unsigned int heightOverride) {
331 return ATMPArgs{
332 config,
338 /*package_submission=*/false,
339 /*package_feerates=*/false,
340 };
341 }
342
347 static ATMPArgs
349 std::vector<COutPoint> &coins_to_uncache) {
350 return ATMPArgs{
351 config,
353 /*bypass_limits=*/false,
355 /*test_accept=*/true,
356 /*height_override=*/0,
357 // not submitting to mempool
358 /*package_submission=*/false,
359 /*package_feerates=*/false,
360 };
361 }
362
364 static ATMPArgs
366 std::vector<COutPoint> &coins_to_uncache) {
367 return ATMPArgs{
368 config,
370 /*bypass_limits=*/false,
372 /*test_accept=*/false,
373 /*height_override=*/0,
374 /*package_submission=*/true,
375 /*package_feerates=*/true,
376 };
377 }
378
380 static ATMPArgs SingleInPackageAccept(const ATMPArgs &package_args) {
381 return ATMPArgs{
382 /*config=*/package_args.m_config,
383 /*accept_time=*/package_args.m_accept_time,
384 /*bypass_limits=*/false,
385 /*coins_to_uncache=*/package_args.m_coins_to_uncache,
386 /*test_accept=*/package_args.m_test_accept,
387 /*height_override=*/package_args.m_heightOverride,
388 // do not LimitMempoolSize in Finalize()
389 /*package_submission=*/true,
390 // only 1 transaction
391 /*package_feerates=*/false,
392 };
393 }
394
395 private:
396 // Private ctor to avoid exposing details to clients and allowing the
397 // possibility of mixing up the order of the arguments. Use static
398 // functions above instead.
399 ATMPArgs(const Config &config, int64_t accept_time, bool bypass_limits,
400 std::vector<COutPoint> &coins_to_uncache, bool test_accept,
401 unsigned int height_override, bool package_submission,
402 bool package_feerates)
403 : m_config{config}, m_accept_time{accept_time},
404 m_bypass_limits{bypass_limits},
405 m_coins_to_uncache{coins_to_uncache}, m_test_accept{test_accept},
406 m_heightOverride{height_override},
407 m_package_submission{package_submission},
408 m_package_feerates(package_feerates) {}
409 };
410
411 // Single transaction acceptance
413 ATMPArgs &args)
415
423 AcceptMultipleTransactions(const std::vector<CTransactionRef> &txns,
424 ATMPArgs &args)
426
440 AcceptSubPackage(const std::vector<CTransactionRef> &subpackage,
441 ATMPArgs &args)
443
450 ATMPArgs &args)
452
453private:
454 // All the intermediate state that gets passed between the various levels
455 // of checking a given transaction.
456 struct Workspace {
457 Workspace(const CTransactionRef &ptx,
459 : m_ptx(ptx),
460 m_next_block_script_verify_flags(next_block_script_verify_flags) {
461 }
467 std::unique_ptr<CTxMemPoolEntry> m_entry;
468
473 int64_t m_vsize;
478 Amount m_base_fees;
479
484 Amount m_modified_fees;
485
492 CFeeRate m_package_feerate{Amount::zero()};
493
494 const CTransactionRef &m_ptx;
495 TxValidationState m_state;
501 PrecomputedTransactionData m_precomputed_txdata;
502
503 // ABC specific flags that are used in both PreChecks and
504 // ConsensusScriptChecks
505 const uint32_t m_next_block_script_verify_flags;
506 int m_sig_checks_standard;
507 };
508
509 // Run the policy checks on a given transaction, excluding any script
510 // checks. Looks up inputs, calculates feerate, considers replacement,
511 // evaluates package limits, etc. As this function can be invoked for "free"
512 // by a peer, only tests that are fast should be done here (to avoid CPU
513 // DoS).
514 bool PreChecks(ATMPArgs &args, Workspace &ws)
516
517 // Re-run the script checks, using consensus flags, and try to cache the
518 // result in the scriptcache. This should be done after
519 // PolicyScriptChecks(). This requires that all inputs either be in our
520 // utxo set or in the mempool.
521 bool ConsensusScriptChecks(const ATMPArgs &args, Workspace &ws)
523
524 // Try to add the transaction to the mempool, removing any conflicts first.
525 // Returns true if the transaction is in the mempool after any size
526 // limiting is performed, false otherwise.
527 bool Finalize(const ATMPArgs &args, Workspace &ws)
529
530 // Submit all transactions to the mempool and call ConsensusScriptChecks to
531 // add to the script cache - should only be called after successful
532 // validation of all transactions in the package.
533 // Does not call LimitMempoolSize(), so mempool max_size_bytes may be
534 // temporarily exceeded.
535 bool SubmitPackage(const ATMPArgs &args, std::vector<Workspace> &workspaces,
537 std::map<TxId, MempoolAcceptResult> &results)
539
540 // Compare a package's feerate against minimum allowed.
541 bool CheckFeeRate(size_t package_size, size_t package_vsize,
545 AssertLockHeld(m_pool.cs);
546
548 m_pool.GetMinFee().GetFee(package_vsize);
549
552 return state.Invalid(
554 "mempool min fee not met",
556 }
557
558 // Do not change this to use virtualsize without coordinating a network
559 // policy upgrade.
560 if (package_fee < m_pool.m_min_relay_feerate.GetFee(package_size)) {
561 return state.Invalid(
563 "min relay fee not met",
564 strprintf("%d < %d", package_fee,
565 m_pool.m_min_relay_feerate.GetFee(package_size)));
566 }
567
568 return true;
569 }
570
571private:
572 CTxMemPool &m_pool;
573 CCoinsViewCache m_view;
574 CCoinsViewMemPool m_viewmempool;
575 CCoinsView m_dummy;
576
577 Chainstate &m_active_chainstate;
578};
579
580bool MemPoolAccept::PreChecks(ATMPArgs &args, Workspace &ws) {
582 AssertLockHeld(m_pool.cs);
583 const CTransactionRef &ptx = ws.m_ptx;
584 const CTransaction &tx = *ws.m_ptx;
585 const TxId &txid = ws.m_ptx->GetId();
586
587 // Copy/alias what we need out of args
588 const int64_t nAcceptTime = args.m_accept_time;
589 const bool bypass_limits = args.m_bypass_limits;
590 std::vector<COutPoint> &coins_to_uncache = args.m_coins_to_uncache;
591 const unsigned int heightOverride = args.m_heightOverride;
592
593 // Alias what we need out of ws
594 TxValidationState &state = ws.m_state;
595 // Coinbase is only valid in a block, not as a loose transaction.
596 if (!CheckRegularTransaction(tx, state)) {
597 // state filled in by CheckRegularTransaction.
598 return false;
599 }
600
601 // Rather not work on nonstandard transactions (unless -testnet)
602 std::string reason;
603 if (m_pool.m_require_standard &&
604 !IsStandardTx(tx, m_pool.m_max_datacarrier_bytes,
605 m_pool.m_permit_bare_multisig,
606 m_pool.m_dust_relay_feerate, reason)) {
607 return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
608 }
609
610 // Only accept nLockTime-using transactions that can be mined in the next
611 // block; we don't want our mempool filled up with transactions that can't
612 // be mined yet.
615 *Assert(m_active_chainstate.m_chain.Tip()),
616 args.m_config.GetChainParams().GetConsensus(), tx, ctxState)) {
617 // We copy the state from a dummy to ensure we don't increase the
618 // ban score of peer for transaction that could be valid in the future.
620 ctxState.GetRejectReason(),
621 ctxState.GetDebugMessage());
622 }
623
624 // Is it already in the memory pool?
625 if (m_pool.exists(txid)) {
627 "txn-already-in-mempool");
628 }
629
630 // Check for conflicts with in-memory transactions
631 for (const CTxIn &txin : tx.vin) {
632 if (const auto ptxConflicting = m_pool.GetConflictTx(txin.prevout)) {
633 if (m_pool.isAvalancheFinalized(ptxConflicting->GetId())) {
635 "finalized-tx-conflict");
636 }
637
638 return state.Invalid(
640 "txn-mempool-conflict");
641 }
642 }
643
644 m_view.SetBackend(m_viewmempool);
645
646 const CCoinsViewCache &coins_cache = m_active_chainstate.CoinsTip();
647 // Do all inputs exist?
648 for (const CTxIn &txin : tx.vin) {
649 if (!coins_cache.HaveCoinInCache(txin.prevout)) {
650 coins_to_uncache.push_back(txin.prevout);
651 }
652
653 // Note: this call may add txin.prevout to the coins cache
654 // (coins_cache.cacheCoins) by way of FetchCoin(). It should be
655 // removed later (via coins_to_uncache) if this tx turns out to be
656 // invalid.
657 if (!m_view.HaveCoin(txin.prevout)) {
658 // Are inputs missing because we already have the tx?
659 for (size_t out = 0; out < tx.vout.size(); out++) {
660 // Optimistically just do efficient check of cache for
661 // outputs.
662 if (coins_cache.HaveCoinInCache(COutPoint(txid, out))) {
664 "txn-already-known");
665 }
666 }
667
668 // Otherwise assume this might be an orphan tx for which we just
669 // haven't seen parents yet.
671 "bad-txns-inputs-missingorspent");
672 }
673 }
674
675 // Are the actual inputs available?
676 if (!m_view.HaveInputs(tx)) {
678 "bad-txns-inputs-spent");
679 }
680
681 // Bring the best block into scope.
682 m_view.GetBestBlock();
683
684 // we have all inputs cached now, so switch back to dummy (to protect
685 // against bugs where we pull more inputs from disk that miss being
686 // added to coins_to_uncache)
687 m_view.SetBackend(m_dummy);
688
689 assert(m_active_chainstate.m_blockman.LookupBlockIndex(
690 m_view.GetBestBlock()) == m_active_chainstate.m_chain.Tip());
691
692 // Only accept BIP68 sequence locked transactions that can be mined in
693 // the next block; we don't want our mempool filled up with transactions
694 // that can't be mined yet.
695 // Pass in m_view which has all of the relevant inputs cached. Note that,
696 // since m_view's backend was removed, it no longer pulls coins from the
697 // mempool.
698 const std::optional<LockPoints> lock_points{CalculateLockPointsAtTip(
699 m_active_chainstate.m_chain.Tip(), m_view, tx)};
700 if (!lock_points.has_value() ||
701 !CheckSequenceLocksAtTip(m_active_chainstate.m_chain.Tip(),
702 *lock_points)) {
704 "non-BIP68-final");
705 }
706
707 // The mempool holds txs for the next block, so pass height+1 to
708 // CheckTxInputs
709 if (!Consensus::CheckTxInputs(tx, state, m_view,
710 m_active_chainstate.m_chain.Height() + 1,
711 ws.m_base_fees)) {
712 // state filled in by CheckTxInputs
713 return false;
714 }
715
716 // Check for non-standard pay-to-script-hash in inputs
717 if (m_pool.m_require_standard &&
718 !AreInputsStandard(tx, m_view, ws.m_next_block_script_verify_flags)) {
720 "bad-txns-nonstandard-inputs");
721 }
722
723 // ws.m_modified_fess includes any fee deltas from PrioritiseTransaction
724 ws.m_modified_fees = ws.m_base_fees;
725 m_pool.ApplyDelta(txid, ws.m_modified_fees);
726
727 unsigned int nSize = tx.GetTotalSize();
728
729 // Validate input scripts against standard script flags.
731 ws.m_next_block_script_verify_flags | STANDARD_SCRIPT_VERIFY_FLAGS;
732 ws.m_precomputed_txdata = PrecomputedTransactionData{tx};
733 if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false,
734 ws.m_precomputed_txdata, ws.m_sig_checks_standard)) {
735 // State filled in by CheckInputScripts
736 return false;
737 }
738
739 ws.m_entry = std::make_unique<CTxMemPoolEntry>(
740 ptx, ws.m_base_fees, nAcceptTime,
741 heightOverride ? heightOverride : m_active_chainstate.m_chain.Height(),
742 ws.m_sig_checks_standard, lock_points.value());
743
744 ws.m_vsize = ws.m_entry->GetTxVirtualSize();
745
746 // No individual transactions are allowed below the min relay feerate except
747 // from disconnected blocks. This requirement, unlike CheckFeeRate, cannot
748 // be bypassed using m_package_feerates because, while a tx could be package
749 // CPFP'd when entering the mempool, we do not have a DoS-resistant method
750 // of ensuring the tx remains bumped. For example, the fee-bumping child
751 // could disappear due to a replacement.
752 if (!bypass_limits &&
753 ws.m_modified_fees <
754 m_pool.m_min_relay_feerate.GetFee(ws.m_ptx->GetTotalSize())) {
755 // Even though this is a fee-related failure, this result is
756 // TX_MEMPOOL_POLICY, not TX_PACKAGE_RECONSIDERABLE, because it cannot
757 // be bypassed using package validation.
758 return state.Invalid(
759 TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met",
760 strprintf("%d < %d", ws.m_modified_fees,
761 m_pool.m_min_relay_feerate.GetFee(nSize)));
762 }
763 // No individual transactions are allowed below the mempool min feerate
764 // except from disconnected blocks and transactions in a package. Package
765 // transactions will be checked using package feerate later.
766 if (!bypass_limits && !args.m_package_feerates &&
767 !CheckFeeRate(nSize, ws.m_vsize, ws.m_modified_fees, state)) {
768 return false;
769 }
770
771 return true;
772}
773
774bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs &args, Workspace &ws) {
776 AssertLockHeld(m_pool.cs);
777 const CTransaction &tx = *ws.m_ptx;
778 const TxId &txid = tx.GetId();
779 TxValidationState &state = ws.m_state;
780
781 // Check again against the next block's script verification flags
782 // to cache our script execution flags.
783 //
784 // This is also useful in case of bugs in the standard flags that cause
785 // transactions to pass as valid when they're actually invalid. For
786 // instance the STRICTENC flag was incorrectly allowing certain CHECKSIG
787 // NOT scripts to pass, even though they were invalid.
788 //
789 // There is a similar check in CreateNewBlock() to prevent creating
790 // invalid blocks (using TestBlockValidity), however allowing such
791 // transactions into the mempool can be exploited as a DoS attack.
794 tx, state, m_view, m_pool, ws.m_next_block_script_verify_flags,
795 ws.m_precomputed_txdata, nSigChecksConsensus,
796 m_active_chainstate.CoinsTip())) {
797 // This can occur under some circumstances, if the node receives an
798 // unrequested tx which is invalid due to new consensus rules not
799 // being activated yet (during IBD).
800 LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against "
801 "latest-block but not STANDARD flags %s, %s\n",
802 txid.ToString(), state.ToString());
803 return Assume(false);
804 }
805
806 if (ws.m_sig_checks_standard != nSigChecksConsensus) {
807 // We can't accept this transaction as we've used the standard count
808 // for the mempool/mining, but the consensus count will be enforced
809 // in validation (we don't want to produce bad block templates).
810 return error(
811 "%s: BUG! PLEASE REPORT THIS! SigChecks count differed between "
812 "standard and consensus flags in %s",
813 __func__, txid.ToString());
814 }
815 return true;
816}
817
818// Get the coins spent by ptx from the coins_view. Assumes coins are present.
819static std::vector<Coin> getSpentCoins(const CTransactionRef &ptx,
820 const CCoinsViewCache &coins_view) {
821 std::vector<Coin> spent_coins;
822 spent_coins.reserve(ptx->vin.size());
823 for (const CTxIn &input : ptx->vin) {
824 Coin coin;
825 const bool coinFound = coins_view.GetCoin(input.prevout, coin);
827 spent_coins.push_back(std::move(coin));
828 }
829 return spent_coins;
830}
831
832bool MemPoolAccept::Finalize(const ATMPArgs &args, Workspace &ws) {
834 AssertLockHeld(m_pool.cs);
835 const TxId &txid = ws.m_ptx->GetId();
836 TxValidationState &state = ws.m_state;
837 const bool bypass_limits = args.m_bypass_limits;
838
839 // Store transaction in memory
840 CTxMemPoolEntry *pentry = ws.m_entry.release();
842 m_pool.addUnchecked(entry);
843
845 ws.m_ptx,
846 std::make_shared<const std::vector<Coin>>(
847 getSpentCoins(ws.m_ptx, m_view)),
848 m_pool.GetAndIncrementSequence());
849
850 // Trim mempool and check if tx was trimmed.
851 // If we are validating a package, don't trim here because we could evict a
852 // previous transaction in the package. LimitMempoolSize() should be called
853 // at the very end to make sure the mempool is still within limits and
854 // package submission happens atomically.
855 if (!args.m_package_submission && !bypass_limits) {
856 m_pool.LimitSize(m_active_chainstate.CoinsTip());
857 if (!m_pool.exists(txid)) {
858 // The tx no longer meets our (new) mempool minimum feerate but
859 // could be reconsidered in a package.
861 "mempool full");
862 }
863 }
864 return true;
865}
866
867bool MemPoolAccept::SubmitPackage(
868 const ATMPArgs &args, std::vector<Workspace> &workspaces,
870 std::map<TxId, MempoolAcceptResult> &results) {
872 AssertLockHeld(m_pool.cs);
873 // Sanity check: none of the transactions should be in the mempool.
874 assert(std::all_of(
875 workspaces.cbegin(), workspaces.cend(),
876 [this](const auto &ws) { return !m_pool.exists(ws.m_ptx->GetId()); }));
877
878 bool all_submitted = true;
879 // ConsensusScriptChecks adds to the script cache and is therefore
880 // consensus-critical; CheckInputsFromMempoolAndCache asserts that
881 // transactions only spend coins available from the mempool or UTXO set.
882 // Submit each transaction to the mempool immediately after calling
883 // ConsensusScriptChecks to make the outputs available for subsequent
884 // transactions.
885 for (Workspace &ws : workspaces) {
886 if (!ConsensusScriptChecks(args, ws)) {
887 results.emplace(ws.m_ptx->GetId(),
889 // Since PreChecks() passed, this should never fail.
890 all_submitted = false;
891 package_state.Invalid(
893 strprintf("BUG! PolicyScriptChecks succeeded but "
894 "ConsensusScriptChecks failed: %s",
895 ws.m_ptx->GetId().ToString()));
896 }
897
898 // If we call LimitMempoolSize() for each individual Finalize(), the
899 // mempool will not take the transaction's descendant feerate into
900 // account because it hasn't seen them yet. Also, we risk evicting a
901 // transaction that a subsequent package transaction depends on.
902 // Instead, allow the mempool to temporarily bypass limits, the maximum
903 // package size) while submitting transactions individually and then
904 // trim at the very end.
905 if (!Finalize(args, ws)) {
906 results.emplace(ws.m_ptx->GetId(),
908 // Since LimitMempoolSize() won't be called, this should never fail.
909 all_submitted = false;
911 strprintf("BUG! Adding to mempool failed: %s",
912 ws.m_ptx->GetId().ToString()));
913 }
914 }
915
916 // It may or may not be the case that all the transactions made it into the
917 // mempool. Regardless, make sure we haven't exceeded max mempool size.
918 m_pool.LimitSize(m_active_chainstate.CoinsTip());
919
920 std::vector<TxId> all_package_txids;
921 all_package_txids.reserve(workspaces.size());
922 std::transform(workspaces.cbegin(), workspaces.cend(),
923 std::back_inserter(all_package_txids),
924 [](const auto &ws) { return ws.m_ptx->GetId(); });
925
926 // Add successful results. The returned results may change later if
927 // LimitMempoolSize() evicts them.
928 for (Workspace &ws : workspaces) {
929 const auto effective_feerate =
930 args.m_package_feerates
931 ? ws.m_package_feerate
932 : CFeeRate{ws.m_modified_fees,
933 static_cast<uint32_t>(ws.m_vsize)};
934 const auto effective_feerate_txids =
935 args.m_package_feerates ? all_package_txids
936 : std::vector<TxId>({ws.m_ptx->GetId()});
937 results.emplace(ws.m_ptx->GetId(),
938 MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees,
941 }
942 return all_submitted;
943}
944
946MemPoolAccept::AcceptSingleTransaction(const CTransactionRef &ptx,
947 ATMPArgs &args) {
949 // mempool "read lock" (held through
950 // GetMainSignals().TransactionAddedToMempool())
951 LOCK(m_pool.cs);
952
953 const CBlockIndex *tip = m_active_chainstate.m_chain.Tip();
954
955 Workspace ws(ptx,
956 GetNextBlockScriptFlags(tip, m_active_chainstate.m_chainman));
957
958 const std::vector<TxId> single_txid{ws.m_ptx->GetId()};
959
960 // Perform the inexpensive checks first and avoid hashing and signature
961 // verification unless those checks pass, to mitigate CPU exhaustion
962 // denial-of-service attacks.
963 if (!PreChecks(args, ws)) {
964 if (ws.m_state.GetResult() ==
966 // Failed for fee reasons. Provide the effective feerate and which
967 // tx was included.
969 ws.m_state, CFeeRate(ws.m_modified_fees, ws.m_vsize),
971 }
972 return MempoolAcceptResult::Failure(ws.m_state);
973 }
974
975 if (!ConsensusScriptChecks(args, ws)) {
976 return MempoolAcceptResult::Failure(ws.m_state);
977 }
978
979 const TxId txid = ptx->GetId();
980
981 // Mempool sanity check -- in our new mempool no tx can be added if its
982 // outputs are already spent in the mempool (that is, no children before
983 // parents allowed; the mempool must be consistent at all times).
984 //
985 // This means that on reorg, the disconnectpool *must* always import
986 // the existing mempool tx's, clear the mempool, and then re-add
987 // remaining tx's in topological order via this function. Our new mempool
988 // has fast adds, so this is ok.
989 if (auto it = m_pool.mapNextTx.lower_bound(COutPoint{txid, 0});
990 it != m_pool.mapNextTx.end() && it->first->GetTxId() == txid) {
991 LogPrintf("%s: BUG! PLEASE REPORT THIS! Attempt to add txid %s, but "
992 "its outputs are already spent in the "
993 "mempool\n",
994 __func__, txid.ToString());
996 "txn-child-before-parent");
997 return MempoolAcceptResult::Failure(ws.m_state);
998 }
999
1000 const CFeeRate effective_feerate{ws.m_modified_fees,
1001 static_cast<uint32_t>(ws.m_vsize)};
1002 // Tx was accepted, but not added
1003 if (args.m_test_accept) {
1004 return MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees,
1006 }
1007
1008 if (!Finalize(args, ws)) {
1009 // The only possible failure reason is fee-related (mempool full).
1010 // Failed for fee reasons. Provide the effective feerate and which txns
1011 // were included.
1012 Assume(ws.m_state.GetResult() ==
1015 ws.m_state, CFeeRate(ws.m_modified_fees, ws.m_vsize), single_txid);
1016 }
1017
1018 return MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees,
1020}
1021
1022PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(
1023 const std::vector<CTransactionRef> &txns, ATMPArgs &args) {
1025
1026 // These context-free package limits can be done before taking the mempool
1027 // lock.
1031 }
1032
1033 std::vector<Workspace> workspaces{};
1034 workspaces.reserve(txns.size());
1035 std::transform(
1036 txns.cbegin(), txns.cend(), std::back_inserter(workspaces),
1037 [this](const auto &tx) {
1038 return Workspace(
1039 tx, GetNextBlockScriptFlags(m_active_chainstate.m_chain.Tip(),
1040 m_active_chainstate.m_chainman));
1041 });
1042 std::map<TxId, MempoolAcceptResult> results;
1043
1044 LOCK(m_pool.cs);
1045
1046 // Do all PreChecks first and fail fast to avoid running expensive script
1047 // checks when unnecessary.
1048 std::vector<TxId> valid_txids;
1049 for (Workspace &ws : workspaces) {
1050 if (!PreChecks(args, ws)) {
1052 "transaction failed");
1053 // Exit early to avoid doing pointless work. Update the failed tx
1054 // result; the rest are unfinished.
1055 results.emplace(ws.m_ptx->GetId(),
1058 std::move(results));
1059 }
1060 // Make the coins created by this transaction available for subsequent
1061 // transactions in the package to spend.
1062 m_viewmempool.PackageAddTransaction(ws.m_ptx);
1063 valid_txids.push_back(ws.m_ptx->GetId());
1064 }
1065
1066 // Transactions must meet two minimum feerates: the mempool minimum fee and
1067 // min relay fee. For transactions consisting of exactly one child and its
1068 // parents, it suffices to use the package feerate
1069 // (total modified fees / total size or vsize) to check this requirement.
1070 // Note that this is an aggregate feerate; this function has not checked
1071 // that there are transactions too low feerate to pay for themselves, or
1072 // that the child transactions are higher feerate than their parents. Using
1073 // aggregate feerate may allow "parents pay for child" behavior and permit
1074 // a child that is below mempool minimum feerate. To avoid these behaviors,
1075 // callers of AcceptMultipleTransactions need to restrict txns topology
1076 // (e.g. to ancestor sets) and check the feerates of individuals and
1077 // subsets.
1078 const auto m_total_size = std::accumulate(
1079 workspaces.cbegin(), workspaces.cend(), int64_t{0},
1080 [](int64_t sum, auto &ws) { return sum + ws.m_ptx->GetTotalSize(); });
1081 const auto m_total_vsize =
1082 std::accumulate(workspaces.cbegin(), workspaces.cend(), int64_t{0},
1083 [](int64_t sum, auto &ws) { return sum + ws.m_vsize; });
1084 const auto m_total_modified_fees = std::accumulate(
1085 workspaces.cbegin(), workspaces.cend(), Amount::zero(),
1086 [](Amount sum, auto &ws) { return sum + ws.m_modified_fees; });
1088 std::vector<TxId> all_package_txids;
1089 all_package_txids.reserve(workspaces.size());
1090 std::transform(workspaces.cbegin(), workspaces.cend(),
1091 std::back_inserter(all_package_txids),
1092 [](const auto &ws) { return ws.m_ptx->GetId(); });
1094 if (args.m_package_feerates &&
1098 "transaction failed");
1100 package_state, {{workspaces.back().m_ptx->GetId(),
1105 }
1106
1107 for (Workspace &ws : workspaces) {
1108 ws.m_package_feerate = package_feerate;
1109 const TxId &ws_txid = ws.m_ptx->GetId();
1110 if (args.m_test_accept &&
1111 std::find(valid_txids.begin(), valid_txids.end(), ws_txid) !=
1112 valid_txids.end()) {
1113 const auto effective_feerate =
1114 args.m_package_feerates
1115 ? ws.m_package_feerate
1116 : CFeeRate{ws.m_modified_fees,
1117 static_cast<uint32_t>(ws.m_vsize)};
1118 const auto effective_feerate_txids =
1119 args.m_package_feerates ? all_package_txids
1120 : std::vector<TxId>{ws.m_ptx->GetId()};
1121 // When test_accept=true, transactions that pass PreChecks
1122 // are valid because there are no further mempool checks (passing
1123 // PreChecks implies passing ConsensusScriptChecks).
1124 results.emplace(ws_txid,
1126 ws.m_vsize, ws.m_base_fees, effective_feerate,
1128 }
1129 }
1130
1131 if (args.m_test_accept) {
1133 }
1134
1136 // PackageValidationState filled in by SubmitPackage().
1138 }
1139
1141}
1142
1144MemPoolAccept::AcceptSubPackage(const std::vector<CTransactionRef> &subpackage,
1145 ATMPArgs &args) {
1147 AssertLockHeld(m_pool.cs);
1148
1149 auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_pool.cs) {
1150 if (subpackage.size() > 1) {
1152 }
1153 const auto &tx = subpackage.front();
1154 ATMPArgs single_args = ATMPArgs::SingleInPackageAccept(args);
1157 if (single_res.m_result_type !=
1160 "transaction failed");
1161 }
1163 {{tx->GetId(), single_res}});
1164 }();
1165
1166 // Clean up m_view and m_viewmempool so that other subpackage evaluations
1167 // don't have access to coins they shouldn't. Keep some coins in order to
1168 // minimize re-fetching coins from the UTXO set.
1169 //
1170 // There are 3 kinds of coins in m_view:
1171 // (1) Temporary coins from the transactions in subpackage, constructed by
1172 // m_viewmempool.
1173 // (2) Mempool coins from transactions in the mempool, constructed by
1174 // m_viewmempool.
1175 // (3) Confirmed coins fetched from our current UTXO set.
1176 //
1177 // (1) Temporary coins need to be removed, regardless of whether the
1178 // transaction was submitted. If the transaction was submitted to the
1179 // mempool, m_viewmempool will be able to fetch them from there. If it
1180 // wasn't submitted to mempool, it is incorrect to keep them - future calls
1181 // may try to spend those coins that don't actually exist.
1182 // (2) Mempool coins also need to be removed. If the mempool contents have
1183 // changed as a result of submitting or replacing transactions, coins
1184 // previously fetched from mempool may now be spent or nonexistent. Those
1185 // coins need to be deleted from m_view.
1186 // (3) Confirmed coins don't need to be removed. The chainstate has not
1187 // changed (we are holding cs_main and no blocks have been processed) so the
1188 // confirmed tx cannot disappear like a mempool tx can. The coin may now be
1189 // spent after we submitted a tx to mempool, but we have already checked
1190 // that the package does not have 2 transactions spending the same coin.
1191 // Keeping them in m_view is an optimization to not re-fetch confirmed coins
1192 // if we later look up inputs for this transaction again.
1193 for (const auto &outpoint : m_viewmempool.GetNonBaseCoins()) {
1194 // In addition to resetting m_viewmempool, we also need to manually
1195 // delete these coins from m_view because it caches copies of the coins
1196 // it fetched from m_viewmempool previously.
1197 m_view.Uncache(outpoint);
1198 }
1199 // This deletes the temporary and mempool coins.
1200 m_viewmempool.Reset();
1201 return result;
1202}
1203
1204PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package &package,
1205 ATMPArgs &args) {
1207 // Used if returning a PackageMempoolAcceptResult directly from this
1208 // function.
1210
1211 // Check that the package is well-formed. If it isn't, we won't try to
1212 // validate any of the transactions and thus won't return any
1213 // MempoolAcceptResults, just a package-wide error.
1214
1215 // Context-free package checks.
1218 }
1219
1220 // All transactions in the package must be a parent of the last transaction.
1221 // This is just an opportunity for us to fail fast on a context-free check
1222 // without taking the mempool lock.
1225 "package-not-child-with-parents");
1227 }
1228
1229 // IsChildWithParents() guarantees the package is > 1 transactions.
1230 assert(package.size() > 1);
1231 // The package must be 1 child with all of its unconfirmed parents. The
1232 // package is expected to be sorted, so the last transaction is the child.
1233 const auto &child = package.back();
1234 std::unordered_set<TxId, SaltedTxIdHasher> unconfirmed_parent_txids;
1235 std::transform(
1236 package.cbegin(), package.cend() - 1,
1238 [](const auto &tx) { return tx->GetId(); });
1239
1240 // All child inputs must refer to a preceding package transaction or a
1241 // confirmed UTXO. The only way to verify this is to look up the child's
1242 // inputs in our current coins view (not including mempool), and enforce
1243 // that all parents not present in the package be available at chain tip.
1244 // Since this check can bring new coins into the coins cache, keep track of
1245 // these coins and uncache them if we don't end up submitting this package
1246 // to the mempool.
1247 const CCoinsViewCache &coins_tip_cache = m_active_chainstate.CoinsTip();
1248 for (const auto &input : child->vin) {
1249 if (!coins_tip_cache.HaveCoinInCache(input.prevout)) {
1250 args.m_coins_to_uncache.push_back(input.prevout);
1251 }
1252 }
1253 // Using the MemPoolAccept m_view cache allows us to look up these same
1254 // coins faster later. This should be connecting directly to CoinsTip, not
1255 // to m_viewmempool, because we specifically require inputs to be confirmed
1256 // if they aren't in the package.
1257 m_view.SetBackend(m_active_chainstate.CoinsTip());
1258 const auto package_or_confirmed = [this, &unconfirmed_parent_txids](
1259 const auto &input) {
1260 return unconfirmed_parent_txids.count(input.prevout.GetTxId()) > 0 ||
1261 m_view.HaveCoin(input.prevout);
1262 };
1263 if (!std::all_of(child->vin.cbegin(), child->vin.cend(),
1267 "package-not-child-with-unconfirmed-parents");
1269 }
1270 // Protect against bugs where we pull more inputs from disk that miss being
1271 // added to coins_to_uncache. The backend will be connected again when
1272 // needed in PreChecks.
1273 m_view.SetBackend(m_dummy);
1274
1275 LOCK(m_pool.cs);
1276 // Stores results from which we will create the returned
1277 // PackageMempoolAcceptResult. A result may be changed if a mempool
1278 // transaction is evicted later due to LimitMempoolSize().
1279 std::map<TxId, MempoolAcceptResult> results_final;
1280 // Results from individual validation which will be returned if no other
1281 // result is available for this transaction. "Nonfinal" because if a
1282 // transaction fails by itself but succeeds later (i.e. when evaluated with
1283 // a fee-bumping child), the result in this map may be discarded.
1284 std::map<TxId, MempoolAcceptResult> individual_results_nonfinal;
1285 bool quit_early{false};
1286 std::vector<CTransactionRef> txns_package_eval;
1287 for (const auto &tx : package) {
1288 const auto &txid = tx->GetId();
1289 // An already confirmed tx is treated as one not in mempool, because all
1290 // we know is that the inputs aren't available.
1291 if (m_pool.exists(txid)) {
1292 // Exact transaction already exists in the mempool.
1293 // Node operators are free to set their mempool policies however
1294 // they please, nodes may receive transactions in different orders,
1295 // and malicious counterparties may try to take advantage of policy
1296 // differences to pin or delay propagation of transactions. As such,
1297 // it's possible for some package transaction(s) to already be in
1298 // the mempool, and we don't want to reject the entire package in
1299 // that case (as that could be a censorship vector). De-duplicate
1300 // the transactions that are already in the mempool, and only call
1301 // AcceptMultipleTransactions() with the new transactions. This
1302 // ensures we don't double-count transaction counts and sizes when
1303 // checking ancestor/descendant limits, or double-count transaction
1304 // fees for fee-related policy.
1305 auto iter = m_pool.GetIter(txid);
1306 assert(iter != std::nullopt);
1308 (*iter.value())->GetTxSize(),
1309 (*iter.value())->GetFee()));
1310 } else {
1311 // Transaction does not already exist in the mempool.
1312 // Try submitting the transaction on its own.
1313 const auto single_package_res = AcceptSubPackage({tx}, args);
1314 const auto &single_res = single_package_res.m_tx_results.at(txid);
1315 if (single_res.m_result_type ==
1317 // The transaction succeeded on its own and is now in the
1318 // mempool. Don't include it in package validation, because its
1319 // fees should only be "used" once.
1320 assert(m_pool.exists(txid));
1321 results_final.emplace(txid, single_res);
1322 } else if (single_res.m_state.GetResult() !=
1324 single_res.m_state.GetResult() !=
1326 // Package validation policy only differs from individual policy
1327 // in its evaluation of feerate. For example, if a transaction
1328 // fails here due to violation of a consensus rule, the result
1329 // will not change when it is submitted as part of a package. To
1330 // minimize the amount of repeated work, unless the transaction
1331 // fails due to feerate or missing inputs (its parent is a
1332 // previous transaction in the package that failed due to
1333 // feerate), don't run package validation. Note that this
1334 // decision might not make sense if different types of packages
1335 // are allowed in the future. Continue individually validating
1336 // the rest of the transactions, because some of them may still
1337 // be valid.
1338 quit_early = true;
1340 PackageValidationResult::PCKG_TX, "transaction failed");
1342 } else {
1344 txns_package_eval.push_back(tx);
1345 }
1346 }
1347 }
1348
1350 quit_early || txns_package_eval.empty()
1355
1356 // Make sure we haven't exceeded max mempool size.
1357 // Package transactions that were submitted to mempool or already in mempool
1358 // may be evicted.
1359 m_pool.LimitSize(m_active_chainstate.CoinsTip());
1360
1361 for (const auto &tx : package) {
1362 const auto &txid = tx->GetId();
1363 if (multi_submission_result.m_tx_results.count(txid) > 0) {
1364 // We shouldn't have re-submitted if the tx result was already in
1365 // results_final.
1366 Assume(results_final.count(txid) == 0);
1367 // If it was submitted, check to see if the tx is still in the
1368 // mempool. It could have been evicted due to LimitMempoolSize()
1369 // above.
1370 const auto &txresult =
1371 multi_submission_result.m_tx_results.at(txid);
1372 if (txresult.m_result_type ==
1374 !m_pool.exists(txid)) {
1376 "transaction failed");
1380 results_final.emplace(
1382 } else {
1383 results_final.emplace(txid, txresult);
1384 }
1385 } else if (const auto final_it{results_final.find(txid)};
1386 final_it != results_final.end()) {
1387 // Already-in-mempool transaction. Check to see if it's still there,
1388 // as it could have been evicted when LimitMempoolSize() was called.
1389 Assume(final_it->second.m_result_type !=
1391 Assume(individual_results_nonfinal.count(txid) == 0);
1392 if (!m_pool.exists(tx->GetId())) {
1394 "transaction failed");
1398 // Replace the previous result.
1399 results_final.erase(txid);
1400 results_final.emplace(
1402 }
1403 } else if (const auto non_final_it{
1404 individual_results_nonfinal.find(txid)};
1406 Assume(non_final_it->second.m_result_type ==
1408 // Interesting result from previous processing.
1409 results_final.emplace(txid, non_final_it->second);
1410 }
1411 }
1412 Assume(results_final.size() == package.size());
1414 std::move(results_final));
1415}
1416} // namespace
1417
1419 const CTransactionRef &tx,
1421 bool test_accept,
1422 unsigned int heightOverride) {
1424 assert(active_chainstate.GetMempool() != nullptr);
1425 CTxMemPool &pool{*active_chainstate.GetMempool()};
1426
1427 std::vector<COutPoint> coins_to_uncache;
1428 auto args = MemPoolAccept::ATMPArgs::SingleAccept(
1429 active_chainstate.m_chainman.GetConfig(), accept_time, bypass_limits,
1431 const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate)
1432 .AcceptSingleTransaction(tx, args);
1434 // Remove coins that were not present in the coins cache before calling
1435 // ATMPW; this is to prevent memory DoS in case we receive a large
1436 // number of invalid transactions that attempt to overrun the in-memory
1437 // coins cache
1438 // (`CCoinsViewCache::cacheCoins`).
1439
1440 for (const COutPoint &outpoint : coins_to_uncache) {
1441 active_chainstate.CoinsTip().Uncache(outpoint);
1442 }
1443 }
1444
1445 // After we've (potentially) uncached entries, ensure our coins cache is
1446 // still within its size limits
1449 return result;
1450}
1451
1453 CTxMemPool &pool,
1454 const Package &package,
1455 bool test_accept) {
1457 assert(!package.empty());
1458 assert(std::all_of(package.cbegin(), package.cend(),
1459 [](const auto &tx) { return tx != nullptr; }));
1460
1461 const Config &config = active_chainstate.m_chainman.GetConfig();
1462
1463 std::vector<COutPoint> coins_to_uncache;
1464 const auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1466 if (test_accept) {
1467 auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(
1468 config, GetTime(), coins_to_uncache);
1469 return MemPoolAccept(pool, active_chainstate)
1470 .AcceptMultipleTransactions(package, args);
1471 } else {
1472 auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(
1473 config, GetTime(), coins_to_uncache);
1474 return MemPoolAccept(pool, active_chainstate)
1475 .AcceptPackage(package, args);
1476 }
1477 }();
1478
1479 // Uncache coins pertaining to transactions that were not submitted to the
1480 // mempool.
1481 if (test_accept || result.m_state.IsInvalid()) {
1482 for (const COutPoint &hashTx : coins_to_uncache) {
1483 active_chainstate.CoinsTip().Uncache(hashTx);
1484 }
1485 }
1486 // Ensure the coins cache is still within limits.
1489 return result;
1490}
1491
1493 int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
1494 // Force block reward to zero when right shift is undefined.
1495 if (halvings >= 64) {
1496 return Amount::zero();
1497 }
1498
1499 Amount nSubsidy = 50 * COIN;
1500 // Subsidy is cut in half every 210,000 blocks which will occur
1501 // approximately every 4 years.
1502 return ((nSubsidy / SATOSHI) >> halvings) * SATOSHI;
1503}
1504
1508
1509void CoinsViews::InitCache() {
1511 m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
1512}
1513
1515 ChainstateManager &chainman,
1516 std::optional<BlockHash> from_snapshot_blockhash)
1517 : m_mempool(mempool), m_blockman(blockman), m_chainman(chainman),
1518 m_from_snapshot_blockhash(from_snapshot_blockhash) {}
1519
1521 bool should_wipe, std::string leveldb_name) {
1524 }
1525
1526 m_coins_views = std::make_unique<CoinsViews>(
1528 .cache_bytes = cache_size_bytes,
1529 .memory_only = in_memory,
1530 .wipe_data = should_wipe,
1531 .obfuscate = true,
1532 .options = m_chainman.m_options.coins_db},
1534}
1535
1536void Chainstate::InitCoinsCache(size_t cache_size_bytes) {
1538 assert(m_coins_views != nullptr);
1540 m_coins_views->InitCache();
1541}
1542
1543// Note that though this is marked const, we may end up modifying
1544// `m_cached_finished_ibd`, which is a performance-related implementation
1545// detail. This function must be marked `const` so that `CValidationInterface`
1546// clients (which are given a `const Chainstate*`) can call it.
1547//
1549 // Optimization: pre-test latch before taking the lock.
1550 if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
1551 return false;
1552 }
1553
1554 LOCK(cs_main);
1555 if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
1556 return false;
1557 }
1559 return true;
1560 }
1561 if (m_chain.Tip() == nullptr) {
1562 return true;
1563 }
1565 return true;
1566 }
1567 if (m_chain.Tip()->Time() <
1569 return true;
1570 }
1571 LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
1572 m_cached_finished_ibd.store(true, std::memory_order_relaxed);
1573 return false;
1574}
1575
1578
1579 // Before we get past initial download, we cannot reliably alert about forks
1580 // (we assume we don't get stuck on a fork before finishing our initial
1581 // sync)
1582 if (IsInitialBlockDownload()) {
1583 return;
1584 }
1585
1586 // If our best fork is no longer within 72 blocks (+/- 12 hours if no one
1587 // mines it) of our head, or if it is back on the active chain, drop it
1590 m_best_fork_tip = nullptr;
1591 }
1592
1593 if (m_best_fork_tip ||
1594 (m_chainman.m_best_invalid &&
1595 m_chainman.m_best_invalid->nChainWork >
1596 m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6))) {
1598 std::string warning =
1599 std::string("'Warning: Large-work fork detected, forking after "
1600 "block ") +
1601 m_best_fork_base->phashBlock->ToString() + std::string("'");
1603 }
1604
1606 LogPrintf("%s: Warning: Large fork found\n forking the "
1607 "chain at height %d (%s)\n lasting to height %d "
1608 "(%s).\nChain state database corruption likely.\n",
1614 } else {
1615 LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks "
1616 "longer than our best chain.\nChain state database "
1617 "corruption likely.\n",
1618 __func__);
1620 }
1621 } else {
1624 }
1625}
1626
1630
1631 // If we are on a fork that is sufficiently large, set a warning flag.
1633
1634 // We define a condition where we should warn the user about as a fork of at
1635 // least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines
1636 // it) of ours. We use 7 blocks rather arbitrarily as it represents just
1637 // under 10% of sustained network hash rate operating on the fork, or a
1638 // chain that is entirely longer than ours and invalid (note that this
1639 // should be detected by both). We define it this way because it allows us
1640 // to only store the highest fork tip (+ base) which meets the 7-block
1641 // condition and from this always have the most-likely-to-cause-warning fork
1642 if (pfork &&
1643 (!m_best_fork_tip ||
1645 pindexNewForkTip->nChainWork - pfork->nChainWork >
1646 (GetBlockProof(*pfork) * 7) &&
1647 m_chain.Height() - pindexNewForkTip->nHeight < 72) {
1650 }
1651
1653}
1654
1655// Called both upon regular invalid block discovery *and* InvalidateBlock
1658 if (!m_chainman.m_best_invalid ||
1659 pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork) {
1660 m_chainman.m_best_invalid = pindexNew;
1661 }
1662 if (m_chainman.m_best_header != nullptr &&
1663 m_chainman.m_best_header->GetAncestor(pindexNew->nHeight) ==
1664 pindexNew) {
1665 m_chainman.m_best_header = m_chain.Tip();
1666 }
1667
1668 // If the invalid chain found is supposed to be finalized, we need to move
1669 // back the finalization point.
1673 }
1674
1675 LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n",
1676 __func__, pindexNew->GetBlockHash().ToString(),
1677 pindexNew->nHeight,
1678 log(pindexNew->nChainWork.getdouble()) / log(2.0),
1679 FormatISO8601DateTime(pindexNew->GetBlockTime()));
1681 assert(tip);
1682 LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n",
1683 __func__, tip->GetBlockHash().ToString(), m_chain.Height(),
1684 log(tip->nChainWork.getdouble()) / log(2.0),
1685 FormatISO8601DateTime(tip->GetBlockTime()));
1686}
1687
1688// Same as InvalidChainFound, above, except not called directly from
1689// InvalidateBlock, which does its own setBlockIndexCandidates management.
1691 const BlockValidationState &state) {
1694 pindex->nStatus = pindex->nStatus.withFailed();
1695 m_chainman.m_failed_blocks.insert(pindex);
1696 m_blockman.m_dirty_blockindex.insert(pindex);
1697 InvalidChainFound(pindex);
1698 }
1699}
1700
1702 int nHeight) {
1703 // Mark inputs spent.
1704 if (tx.IsCoinBase()) {
1705 return;
1706 }
1707
1708 txundo.vprevout.reserve(tx.vin.size());
1709 for (const CTxIn &txin : tx.vin) {
1710 txundo.vprevout.emplace_back();
1711 bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back());
1712 assert(is_spent);
1713 }
1714}
1715
1717 int nHeight) {
1719 AddCoins(view, tx, nHeight);
1720}
1721
1723 const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1724 if (!VerifyScript(scriptSig, m_tx_out.scriptPubKey, nFlags,
1727 metrics, &error)) {
1728 return false;
1729 }
1730 if ((pTxLimitSigChecks &&
1734 // we can't assign a meaningful script error (since the script
1735 // succeeded), but remove the ScriptError::OK which could be
1736 // misinterpreted.
1738 return false;
1739 }
1740 return true;
1741}
1742
1744 const CCoinsViewCache &inputs, const uint32_t flags,
1746 const PrecomputedTransactionData &txdata,
1748 CheckInputsLimiter *pBlockLimitSigChecks,
1749 std::vector<CScriptCheck> *pvChecks) {
1751 assert(!tx.IsCoinBase());
1752
1753 if (pvChecks) {
1754 pvChecks->reserve(tx.vin.size());
1755 }
1756
1757 // First check if script executions have been cached with the same flags.
1758 // Note that this assumes that the inputs provided are correct (ie that the
1759 // transaction hash which is in tx's prevouts properly commits to the
1760 // scriptPubKey in the inputs view of that transaction).
1763 if (!txLimitSigChecks.consume_and_check(nSigChecksOut) ||
1764 (pBlockLimitSigChecks &&
1765 !pBlockLimitSigChecks->consume_and_check(nSigChecksOut))) {
1767 "too-many-sigchecks");
1768 }
1769 return true;
1770 }
1771
1772 int nSigChecksTotal = 0;
1773
1774 for (size_t i = 0; i < tx.vin.size(); i++) {
1775 const COutPoint &prevout = tx.vin[i].prevout;
1776 const Coin &coin = inputs.AccessCoin(prevout);
1777 assert(!coin.IsSpent());
1778
1779 // We very carefully only pass in things to CScriptCheck which are
1780 // clearly committed to by tx's hash. This provides a sanity
1781 // check that our caching is not introducing consensus failures through
1782 // additional data in, eg, the coins being spent being checked as a part
1783 // of CScriptCheck.
1784
1785 // Verify signature
1786 CScriptCheck check(coin.GetTxOut(), tx, i, flags, sigCacheStore, txdata,
1787 &txLimitSigChecks, pBlockLimitSigChecks);
1788
1789 // If pvChecks is not null, defer the check execution to the caller.
1790 if (pvChecks) {
1791 pvChecks->push_back(std::move(check));
1792 continue;
1793 }
1794
1795 if (!check()) {
1796 ScriptError scriptError = check.GetScriptError();
1797 // Compute flags without the optional standardness flags.
1798 // This differs from MANDATORY_SCRIPT_VERIFY_FLAGS as it contains
1799 // additional upgrade flags (see AcceptToMemoryPoolWorker variable
1800 // extraFlags).
1803 if (flags != mandatoryFlags) {
1804 // Check whether the failure was caused by a non-mandatory
1805 // script verification check. If so, ensure we return
1806 // NOT_STANDARD instead of CONSENSUS to avoid downstream users
1807 // splitting the network between upgraded and non-upgraded nodes
1808 // by banning CONSENSUS-failing data providers.
1810 sigCacheStore, txdata);
1811 if (check2()) {
1812 return state.Invalid(
1814 strprintf("non-mandatory-script-verify-flag (%s)",
1815 ScriptErrorString(scriptError)));
1816 }
1817 // update the error message to reflect the mandatory violation.
1818 scriptError = check2.GetScriptError();
1819 }
1820
1821 // MANDATORY flag failures correspond to
1822 // TxValidationResult::TX_CONSENSUS. Because CONSENSUS failures are
1823 // the most serious case of validation failures, we may need to
1824 // consider using RECENT_CONSENSUS_CHANGE for any script failure
1825 // that could be due to non-upgraded nodes which we may want to
1826 // support, to avoid splitting the network (but this depends on the
1827 // details of how net_processing handles such errors).
1828 return state.Invalid(
1830 strprintf("mandatory-script-verify-flag-failed (%s)",
1831 ScriptErrorString(scriptError)));
1832 }
1833
1835 }
1836
1838
1839 if (scriptCacheStore && !pvChecks) {
1840 // We executed all of the provided scripts, and were told to cache the
1841 // result. Do so now.
1843 }
1844
1845 return true;
1846}
1847
1848bool AbortNode(BlockValidationState &state, const std::string &strMessage,
1849 const bilingual_str &userMessage) {
1851 return state.Error(strMessage);
1852}
1853
1856 const COutPoint &out) {
1857 bool fClean = true;
1858
1859 if (view.HaveCoin(out)) {
1860 // Overwriting transaction output.
1861 fClean = false;
1862 }
1863
1864 if (undo.GetHeight() == 0) {
1865 // Missing undo metadata (height and coinbase). Older versions included
1866 // this information only in undo records for the last spend of a
1867 // transactions' outputs. This implies that it must be present for some
1868 // other output of the same tx.
1869 const Coin &alternate = AccessByTxid(view, out.GetTxId());
1870 if (alternate.IsSpent()) {
1871 // Adding output for transaction without known metadata
1873 }
1874
1875 // This is somewhat ugly, but hopefully utility is limited. This is only
1876 // useful when working from legacy on disck data. In any case, putting
1877 // the correct information in there doesn't hurt.
1878 const_cast<Coin &>(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(),
1879 alternate.IsCoinBase());
1880 }
1881
1882 // If the coin already exists as an unspent coin in the cache, then the
1883 // possible_overwrite parameter to AddCoin must be set to true. We have
1884 // already checked whether an unspent coin exists above using HaveCoin, so
1885 // we don't need to guess. When fClean is false, an unspent coin already
1886 // existed and it is an overwrite.
1887 view.AddCoin(out, std::move(undo), !fClean);
1888
1890}
1891
1896DisconnectResult Chainstate::DisconnectBlock(const CBlock &block,
1897 const CBlockIndex *pindex,
1901 if (!m_blockman.UndoReadFromDisk(blockUndo, *pindex)) {
1902 error("DisconnectBlock(): failure reading undo data");
1904 }
1905
1906 return ApplyBlockUndo(std::move(blockUndo), block, pindex, view);
1907}
1908
1910 const CBlockIndex *pindex,
1912 bool fClean = true;
1913
1914 if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1915 error("DisconnectBlock(): block and undo data inconsistent");
1917 }
1918
1919 // First, restore inputs.
1920 for (size_t i = 1; i < block.vtx.size(); i++) {
1921 const CTransaction &tx = *(block.vtx[i]);
1922 CTxUndo &txundo = blockUndo.vtxundo[i - 1];
1923 if (txundo.vprevout.size() != tx.vin.size()) {
1924 error("DisconnectBlock(): transaction and undo data inconsistent");
1926 }
1927
1928 for (size_t j = 0; j < tx.vin.size(); j++) {
1929 const COutPoint &out = tx.vin[j].prevout;
1931 UndoCoinSpend(std::move(txundo.vprevout[j]), view, out);
1934 }
1936 }
1937 // At this point, all of txundo.vprevout should have been moved out.
1938 }
1939
1940 // Second, revert created outputs.
1941 for (const auto &ptx : block.vtx) {
1942 const CTransaction &tx = *ptx;
1943 const TxId &txid = tx.GetId();
1944 const bool is_coinbase = tx.IsCoinBase();
1945
1946 // Check that all outputs are available and match the outputs in the
1947 // block itself exactly.
1948 for (size_t o = 0; o < tx.vout.size(); o++) {
1949 if (tx.vout[o].scriptPubKey.IsUnspendable()) {
1950 continue;
1951 }
1952
1953 COutPoint out(txid, o);
1954 Coin coin;
1955 bool is_spent = view.SpendCoin(out, &coin);
1956 if (!is_spent || tx.vout[o] != coin.GetTxOut() ||
1957 uint32_t(pindex->nHeight) != coin.GetHeight() ||
1958 is_coinbase != coin.IsCoinBase()) {
1959 // transaction output mismatch
1960 fClean = false;
1961 }
1962 }
1963 }
1964
1965 // Move best block pointer to previous block.
1966 view.SetBestBlock(block.hashPrevBlock);
1967
1969}
1970
1972
1974 scriptcheckqueue.StartWorkerThreads(threads_num);
1975}
1976
1978 scriptcheckqueue.StopWorkerThreads();
1979}
1980
1981// Returns the script flags which should be checked for the block after
1982// the given block.
1984 const ChainstateManager &chainman) {
1986
1988
1989 // Enforce P2SH (BIP16)
1990 if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_P2SH)) {
1992 }
1993
1994 // Enforce the DERSIG (BIP66) rule.
1995 if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_DERSIG)) {
1997 }
1998
1999 // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule.
2000 if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_CLTV)) {
2002 }
2003
2004 // Start enforcing CSV (BIP68, BIP112 and BIP113) rule.
2005 if (DeploymentActiveAfter(pindex, chainman, Consensus::DEPLOYMENT_CSV)) {
2007 }
2008
2009 // If the UAHF is enabled, we start accepting replay protected txns
2010 if (IsUAHFenabled(consensusparams, pindex)) {
2013 }
2014
2015 // If the DAA HF is enabled, we start rejecting transaction that use a high
2016 // s in their signature. We also make sure that signature that are supposed
2017 // to fail (for instance in multisig or other forms of smart contracts) are
2018 // null.
2019 if (IsDAAEnabled(consensusparams, pindex)) {
2022 }
2023
2024 // When the magnetic anomaly fork is enabled, we start accepting
2025 // transactions using the OP_CHECKDATASIG opcode and it's verify
2026 // alternative. We also start enforcing push only signatures and
2027 // clean stack.
2031 }
2032
2033 if (IsGravitonEnabled(consensusparams, pindex)) {
2036 }
2037
2038 if (IsPhononEnabled(consensusparams, pindex)) {
2040 }
2041
2042 // We make sure this node will have replay protection during the next hard
2043 // fork.
2046 }
2047
2048 return flags;
2049}
2050
2058
2065bool Chainstate::ConnectBlock(const CBlock &block, BlockValidationState &state,
2068 bool fJustCheck) {
2070 assert(pindex);
2071
2072 const BlockHash block_hash{block.GetHash()};
2073 assert(*pindex->phashBlock == block_hash);
2074
2076
2077 const CChainParams &params{m_chainman.GetParams()};
2078 const Consensus::Params &consensusParams = params.GetConsensus();
2079
2080 // Check it again in case a previous version let a bad block in
2081 // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
2082 // ContextualCheckBlockHeader() here. This means that if we add a new
2083 // consensus rule that is enforced in one of those two functions, then we
2084 // may have let in a block that violates the rule prior to updating the
2085 // software, and we would NOT be enforcing the rule here. Fully solving
2086 // upgrade from one software version to the next after a consensus rule
2087 // change is potentially tricky and issue-specific.
2088 // Also, currently the rule against blocks more than 2 hours in the future
2089 // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
2090 // re-enforce that rule here (at least until we make it impossible for
2091 // m_adjusted_time_callback() to go backward).
2092 if (!CheckBlock(block, state, consensusParams,
2093 options.withCheckPoW(!fJustCheck)
2096 // We don't write down blocks to disk if they may have been
2097 // corrupted, so this should be impossible unless we're having
2098 // hardware problems.
2099 return AbortNode(state, "Corrupt block found indicating potential "
2100 "hardware failure; shutting down");
2101 }
2102 return error("%s: Consensus::CheckBlock: %s", __func__,
2103 state.ToString());
2104 }
2105
2106 // Verify that the view's current state corresponds to the previous block
2107 BlockHash hashPrevBlock =
2108 pindex->pprev == nullptr ? BlockHash() : pindex->pprev->GetBlockHash();
2109 assert(hashPrevBlock == view.GetBestBlock());
2110
2111 nBlocksTotal++;
2112
2113 // Special case for the genesis block, skipping connection of its
2114 // transactions (its coinbase is unspendable)
2115 if (block_hash == consensusParams.hashGenesisBlock) {
2116 if (!fJustCheck) {
2117 view.SetBestBlock(pindex->GetBlockHash());
2118 }
2119
2120 return true;
2121 }
2122
2123 bool fScriptChecks = true;
2125 // We've been configured with the hash of a block which has been
2126 // externally verified to have a valid history. A suitable default value
2127 // is included with the software and updated from time to time. Because
2128 // validity relative to a piece of software is an objective fact these
2129 // defaults can be easily reviewed. This setting doesn't force the
2130 // selection of any particular chain but makes validating some faster by
2131 // effectively caching the result of part of the verification.
2132 BlockMap::const_iterator it{
2133 m_blockman.m_block_index.find(m_chainman.AssumedValidBlock())};
2134 if (it != m_blockman.m_block_index.end()) {
2135 if (it->second.GetAncestor(pindex->nHeight) == pindex &&
2136 m_chainman.m_best_header->GetAncestor(pindex->nHeight) ==
2137 pindex &&
2138 m_chainman.m_best_header->nChainWork >=
2140 // This block is a member of the assumed verified chain and an
2141 // ancestor of the best header.
2142 // Script verification is skipped when connecting blocks under
2143 // the assumevalid block. Assuming the assumevalid block is
2144 // valid this is safe because block merkle hashes are still
2145 // computed and checked, Of course, if an assumed valid block is
2146 // invalid due to false scriptSigs this optimization would allow
2147 // an invalid chain to be accepted.
2148 // The equivalent time check discourages hash power from
2149 // extorting the network via DOS attack into accepting an
2150 // invalid block through telling users they must manually set
2151 // assumevalid. Requiring a software change or burying the
2152 // invalid block, regardless of the setting, makes it hard to
2153 // hide the implication of the demand. This also avoids having
2154 // release candidates that are hardly doing any signature
2155 // verification at all in testing without having to artificially
2156 // set the default assumed verified block further back. The test
2157 // against the minimum chain work prevents the skipping when
2158 // denied access to any chain at least as good as the expected
2159 // chain.
2161 *m_chainman.m_best_header, *pindex,
2162 *m_chainman.m_best_header,
2163 consensusParams) <= 60 * 60 * 24 * 7 * 2);
2164 }
2165 }
2166 }
2167
2170 LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n",
2173
2174 // Do not allow blocks that contain transactions which 'overwrite' older
2175 // transactions, unless those are already completely spent. If such
2176 // overwrites are allowed, coinbases and transactions depending upon those
2177 // can be duplicated to remove the ability to spend the first instance --
2178 // even after being sent to another address.
2179 // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html
2180 // for more information. This rule was originally applied to all blocks
2181 // with a timestamp after March 15, 2012, 0:00 UTC. Now that the whole
2182 // chain is irreversibly beyond that time it is applied to all blocks
2183 // except the two in the chain that violate it. This prevents exploiting
2184 // the issue against nodes during their initial block download.
2185 bool fEnforceBIP30 = !((pindex->nHeight == 91842 &&
2186 pindex->GetBlockHash() ==
2187 uint256S("0x00000000000a4d0a398161ffc163c503763"
2188 "b1f4360639393e0e4c8e300e0caec")) ||
2189 (pindex->nHeight == 91880 &&
2190 pindex->GetBlockHash() ==
2191 uint256S("0x00000000000743f190a18c5577a3c2d2a1f"
2192 "610ae9601ac046a38084ccb7cd721")));
2193
2194 // Once BIP34 activated it was not possible to create new duplicate
2195 // coinbases and thus other than starting with the 2 existing duplicate
2196 // coinbase pairs, not possible to create overwriting txs. But by the time
2197 // BIP34 activated, in each of the existing pairs the duplicate coinbase had
2198 // overwritten the first before the first had been spent. Since those
2199 // coinbases are sufficiently buried it's no longer possible to create
2200 // further duplicate transactions descending from the known pairs either. If
2201 // we're on the known chain at height greater than where BIP34 activated, we
2202 // can save the db accesses needed for the BIP30 check.
2203
2204 // BIP34 requires that a block at height X (block X) has its coinbase
2205 // scriptSig start with a CScriptNum of X (indicated height X). The above
2206 // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
2207 // case that there is a block X before the BIP34 height of 227,931 which has
2208 // an indicated height Y where Y is greater than X. The coinbase for block
2209 // X would also be a valid coinbase for block Y, which could be a BIP30
2210 // violation. An exhaustive search of all mainnet coinbases before the
2211 // BIP34 height which have an indicated height greater than the block height
2212 // reveals many occurrences. The 3 lowest indicated heights found are
2213 // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
2214 // heights would be the first opportunity for BIP30 to be violated.
2215
2216 // The search reveals a great many blocks which have an indicated height
2217 // greater than 1,983,702, so we simply remove the optimization to skip
2218 // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
2219 // that block in another 25 years or so, we should take advantage of a
2220 // future consensus change to do a new and improved version of BIP34 that
2221 // will actually prevent ever creating any duplicate coinbases in the
2222 // future.
2223 static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
2224
2225 // There is no potential to create a duplicate coinbase at block 209,921
2226 // because this is still before the BIP34 height and so explicit BIP30
2227 // checking is still active.
2228
2229 // The final case is block 176,684 which has an indicated height of
2230 // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
2231 // before block 490,897 so there was not much opportunity to address this
2232 // case other than to carefully analyze it and determine it would not be a
2233 // problem. Block 490,897 was, in fact, mined with a different coinbase than
2234 // block 176,684, but it is important to note that even if it hadn't been or
2235 // is remined on an alternate fork with a duplicate coinbase, we would still
2236 // not run into a BIP30 violation. This is because the coinbase for 176,684
2237 // is spent in block 185,956 in transaction
2238 // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
2239 // spending transaction can't be duplicated because it also spends coinbase
2240 // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
2241 // coinbase has an indicated height of over 4.2 billion, and wouldn't be
2242 // duplicatable until that height, and it's currently impossible to create a
2243 // chain that long. Nevertheless we may wish to consider a future soft fork
2244 // which retroactively prevents block 490,897 from creating a duplicate
2245 // coinbase. The two historical BIP30 violations often provide a confusing
2246 // edge case when manipulating the UTXO and it would be simpler not to have
2247 // another edge case to deal with.
2248
2249 // testnet3 has no blocks before the BIP34 height with indicated heights
2250 // post BIP34 before approximately height 486,000,000 and presumably will
2251 // be reset before it reaches block 1,983,702 and starts doing unnecessary
2252 // BIP30 checking again.
2253 assert(pindex->pprev);
2255 pindex->pprev->GetAncestor(consensusParams.BIP34Height);
2256 // Only continue to enforce if we're below BIP34 activation height or the
2257 // block hash at that height doesn't correspond.
2259 fEnforceBIP30 &&
2261 !(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash));
2262
2263 // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have
2264 // a consensus change that ensures coinbases at those heights can not
2265 // duplicate earlier coinbases.
2267 for (const auto &tx : block.vtx) {
2268 for (size_t o = 0; o < tx->vout.size(); o++) {
2269 if (view.HaveCoin(COutPoint(tx->GetId(), o))) {
2270 LogPrintf("ERROR: ConnectBlock(): tried to overwrite "
2271 "transaction\n");
2273 "bad-txns-BIP30");
2274 }
2275 }
2276 }
2277 }
2278
2279 // Enforce BIP68 (sequence locks).
2280 int nLockTimeFlags = 0;
2284 }
2285
2287
2290 LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n",
2293
2294 std::vector<int> prevheights;
2295 Amount nFees = Amount::zero();
2296 int nInputs = 0;
2297
2298 // Limit the total executed signature operations in the block, a consensus
2299 // rule. Tracking during the CPU-consuming part (validation of uncached
2300 // inputs) is per-input atomic and validation in each thread stops very
2301 // quickly after the limit is exceeded, so an adversary cannot cause us to
2302 // exceed the limit by much at all.
2305
2306 std::vector<TxSigCheckLimiter> nSigChecksTxLimiters;
2307 nSigChecksTxLimiters.resize(block.vtx.size() - 1);
2308
2310 blockundo.vtxundo.resize(block.vtx.size() - 1);
2311
2313 : nullptr);
2314
2315 // Add all outputs
2316 try {
2317 for (const auto &ptx : block.vtx) {
2318 AddCoins(view, *ptx, pindex->nHeight);
2319 }
2320 } catch (const std::logic_error &e) {
2321 // This error will be thrown from AddCoin if we try to connect a block
2322 // containing duplicate transactions. Such a thing should normally be
2323 // caught early nowadays (due to ContextualCheckBlock's CTOR
2324 // enforcement) however some edge cases can escape that:
2325 // - ContextualCheckBlock does not get re-run after saving the block to
2326 // disk, and older versions may have saved a weird block.
2327 // - its checks are not applied to pre-CTOR chains, which we might visit
2328 // with checkpointing off.
2329 LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
2331 "tx-duplicate");
2332 }
2333
2334 size_t txIndex = 0;
2335 // nSigChecksRet may be accurate (found in cache) or 0 (checks were
2336 // deferred into vChecks).
2337 int nSigChecksRet;
2338 for (const auto &ptx : block.vtx) {
2339 const CTransaction &tx = *ptx;
2340 const bool isCoinBase = tx.IsCoinBase();
2341 nInputs += tx.vin.size();
2342
2343 {
2346 if (!isCoinBase &&
2348 txfee)) {
2349 // Any transaction validation failure in ConnectBlock is a block
2350 // consensus failure.
2352 tx_state.GetRejectReason(),
2353 tx_state.GetDebugMessage());
2354
2355 return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
2356 tx.GetId().ToString(), state.ToString());
2357 }
2358 nFees += txfee;
2359 }
2360
2361 if (!MoneyRange(nFees)) {
2362 LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n",
2363 __func__);
2365 "bad-txns-accumulated-fee-outofrange");
2366 }
2367
2368 // The following checks do not apply to the coinbase.
2369 if (isCoinBase) {
2370 continue;
2371 }
2372
2373 // Check that transaction is BIP68 final BIP68 lock checks (as
2374 // opposed to nLockTime checks) must be in ConnectBlock because they
2375 // require the UTXO set.
2376 prevheights.resize(tx.vin.size());
2377 for (size_t j = 0; j < tx.vin.size(); j++) {
2378 prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight();
2379 }
2380
2381 if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
2382 LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n",
2383 __func__);
2385 "bad-txns-nonfinal");
2386 }
2387
2388 // Don't cache results if we're actually connecting blocks (still
2389 // consult the cache, though).
2391
2393 if (!fEnforceSigCheck) {
2394 // Historically, there has been transactions with a very high
2395 // sigcheck count, so we need to disable this check for such
2396 // transactions.
2398 }
2399
2400 std::vector<CScriptCheck> vChecks;
2402 if (fScriptChecks &&
2407 // Any transaction validation failure in ConnectBlock is a block
2408 // consensus failure
2410 tx_state.GetRejectReason(),
2411 tx_state.GetDebugMessage());
2412 return error(
2413 "ConnectBlock(): CheckInputScripts on %s failed with %s",
2414 tx.GetId().ToString(), state.ToString());
2415 }
2416
2417 control.Add(std::move(vChecks));
2418
2419 // Note: this must execute in the same iteration as CheckTxInputs (not
2420 // in a separate loop) in order to detect double spends. However,
2421 // this does not prevent double-spending by duplicated transaction
2422 // inputs in the same transaction (cf. CVE-2018-17144) -- that check is
2423 // done in CheckBlock (CheckRegularTransaction).
2424 SpendCoins(view, tx, blockundo.vtxundo.at(txIndex), pindex->nHeight);
2425 txIndex++;
2426 }
2427
2431 " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) "
2432 "[%.2fs (%.2fms/blk)]\n",
2433 (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2),
2434 MILLI * (nTime3 - nTime2) / block.vtx.size(),
2435 nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs - 1),
2437
2438 const Amount blockReward =
2439 nFees + GetBlockSubsidy(pindex->nHeight, consensusParams);
2440 if (block.vtx[0]->GetValueOut() > blockReward) {
2441 LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs "
2442 "limit=%d)\n",
2443 block.vtx[0]->GetValueOut(), blockReward);
2445 "bad-cb-amount");
2446 }
2447
2448 if (blockFees) {
2449 *blockFees = nFees;
2450 }
2451
2452 if (!control.Wait()) {
2454 "blk-bad-inputs", "parallel script check failed");
2455 }
2456
2459 LogPrint(
2461 " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n",
2462 nInputs - 1, MILLI * (nTime4 - nTime2),
2463 nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs - 1),
2465
2466 if (fJustCheck) {
2467 return true;
2468 }
2469
2470 if (!m_blockman.WriteUndoDataForBlock(blockundo, state, *pindex)) {
2471 return false;
2472 }
2473
2474 if (!pindex->IsValid(BlockValidity::SCRIPTS)) {
2476 m_blockman.m_dirty_blockindex.insert(pindex);
2477 }
2478
2479 // add this block to the view's block chain
2480 view.SetBestBlock(pindex->GetBlockHash());
2481
2484 LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n",
2487
2488 TRACE6(validation, block_connected, block_hash.data(), pindex->nHeight,
2489 block.vtx.size(), nInputs, nSigChecksRet,
2490 // in microseconds (µs)
2491 nTime5 - nTimeStart);
2492
2493 return true;
2494}
2495
2496CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState() {
2500 : 0);
2501}
2502
2504Chainstate::GetCoinsCacheSizeState(size_t max_coins_cache_size_bytes,
2505 size_t max_mempool_size_bytes) {
2511 std::max<int64_t>(int64_t(max_mempool_size_bytes) - nMempoolUsage, 0);
2512
2514 static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES =
2515 10 * 1024 * 1024; // 10MB
2516 int64_t large_threshold = std::max(
2518
2519 if (cacheSize > nTotalSpace) {
2520 LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize,
2521 nTotalSpace);
2523 } else if (cacheSize > large_threshold) {
2525 }
2527}
2528
2531 LOCK(cs_main);
2532 assert(this->CanFlushToDisk());
2533 std::set<int> setFilesToPrune;
2534 bool full_flush_completed = false;
2535
2536 const size_t coins_count = CoinsTip().GetCacheSize();
2537 const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2538
2539 try {
2540 {
2541 bool fFlushForPrune = false;
2542 bool fDoFullFlush = false;
2543
2546 if (m_blockman.IsPruneMode() &&
2548 !fReindex) {
2549 // Make sure we don't prune any of the prune locks bestblocks.
2550 // Pruning is height-based.
2551 int last_prune{m_chain.Height()};
2552 // prune lock that actually was the limiting factor, only used
2553 // for logging
2554 std::optional<std::string> limiting_lock;
2555
2556 for (const auto &prune_lock : m_blockman.m_prune_locks) {
2557 if (prune_lock.second.height_first ==
2558 std::numeric_limits<int>::max()) {
2559 continue;
2560 }
2561 // Remove the buffer and one additional block here to get
2562 // actual height that is outside of the buffer
2563 const int lock_height{prune_lock.second.height_first -
2564 PRUNE_LOCK_BUFFER - 1};
2565 last_prune = std::max(1, std::min(last_prune, lock_height));
2566 if (last_prune == lock_height) {
2567 limiting_lock = prune_lock.first;
2568 }
2569 }
2570
2571 if (limiting_lock) {
2572 LogPrint(BCLog::PRUNE, "%s limited pruning to height %d\n",
2573 limiting_lock.value(), last_prune);
2574 }
2575
2576 if (nManualPruneHeight > 0) {
2578 "find files to prune (manual)", BCLog::BENCH);
2581 std::min(last_prune, nManualPruneHeight),
2582 m_chain.Height());
2583 } else {
2584 LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune",
2585 BCLog::BENCH);
2591 }
2592 if (!setFilesToPrune.empty()) {
2593 fFlushForPrune = true;
2595 m_blockman.m_block_tree_db->WriteFlag(
2596 "prunedblockfiles", true);
2598 }
2599 }
2600 }
2602 // Avoid writing/flushing immediately after startup.
2603 if (m_last_write.count() == 0) {
2605 }
2606 if (m_last_flush.count() == 0) {
2608 }
2609 // The cache is large and we're within 10% and 10 MiB of the limit,
2610 // but we have time now (not in the middle of a block processing).
2611 bool fCacheLarge = mode == FlushStateMode::PERIODIC &&
2613 // The cache is over the limit, we have to write now.
2616 // It's been a while since we wrote the block index to disk. Do this
2617 // frequently, so we don't need to redownload after a crash.
2620 // It's been very long since we flushed the cache. Do this
2621 // infrequently, to optimize cache usage.
2624 // Combine all conditions that result in a full cache flush.
2627 // Write blocks and block index to disk.
2629 // Ensure we can write block index
2631 return AbortNode(state, "Disk space is too low!",
2632 _("Disk space is too low!"));
2633 }
2634
2635 {
2637 "write block and undo data to disk", BCLog::BENCH);
2638
2639 // First make sure all block and undo data is flushed to
2640 // disk.
2642 }
2643 // Then update all block file information (which may refer to
2644 // block and undo files).
2645 {
2646 LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk",
2647 BCLog::BENCH);
2648
2649 if (!m_blockman.WriteBlockIndexDB()) {
2650 return AbortNode(
2651 state, "Failed to write to block index database");
2652 }
2653 }
2654
2655 // Finally remove any pruned files
2656 if (fFlushForPrune) {
2657 LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files",
2658 BCLog::BENCH);
2659
2661 }
2663 }
2664 // Flush best chain related state. This can only be done if the
2665 // blocks / block index write was also done.
2666 if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2668 strprintf("write coins cache to disk (%d coins, %.2fkB)",
2669 coins_count, coins_mem_usage / 1000),
2670 BCLog::BENCH);
2671
2672 // Typical Coin structures on disk are around 48 bytes in size.
2673 // Pushing a new one to the database can cause it to be written
2674 // twice (once in the log, and once in the tables). This is
2675 // already an overestimation, as most will delete an existing
2676 // entry or overwrite one. Still, use a conservative safety
2677 // factor of 2.
2679 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2680 return AbortNode(state, "Disk space is too low!",
2681 _("Disk space is too low!"));
2682 }
2683
2684 // Flush the chainstate (which may refer to block index
2685 // entries).
2686 if (!CoinsTip().Flush()) {
2687 return AbortNode(state, "Failed to write to coin database");
2688 }
2690 full_flush_completed = true;
2691 }
2692
2693 TRACE5(utxocache, flush,
2694 // in microseconds (µs)
2695 GetTimeMicros() - nNow.count(), uint32_t(mode), coins_count,
2697 }
2698
2700 // Update best block in wallet (so we can detect restored wallets).
2702 }
2703 } catch (const std::runtime_error &e) {
2704 return AbortNode(state, std::string("System error while flushing: ") +
2705 e.what());
2706 }
2707 return true;
2708}
2709
2712 if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
2713 LogPrintf("%s: failed to flush state (%s)\n", __func__,
2714 state.ToString());
2715 }
2716}
2717
2721 if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) {
2722 LogPrintf("%s: failed to flush state (%s)\n", __func__,
2723 state.ToString());
2724 }
2725}
2726
2728 const CBlockIndex *tip, const CChainParams &params,
2729 const std::string &func_name,
2730 const std::string &prefix)
2733 LogPrintf("%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%ld "
2734 "date='%s' progress=%f cache=%.1fMiB(%utxo)\n",
2735 prefix, func_name, tip->GetBlockHash().ToString(), tip->nHeight,
2736 tip->nVersion, log(tip->nChainWork.getdouble()) / log(2.0),
2737 tip->GetChainTxCount(),
2738 FormatISO8601DateTime(tip->GetBlockTime()),
2739 GuessVerificationProgress(params.TxData(), tip),
2740 coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
2741 coins_tip.GetCacheSize());
2742}
2743
2744void Chainstate::UpdateTip(const CBlockIndex *pindexNew) {
2746 const auto &coins_tip = CoinsTip();
2747
2748 const CChainParams &params{m_chainman.GetParams()};
2749
2750 // The remainder of the function isn't relevant if we are not acting on
2751 // the active chainstate, so return if need be.
2752 if (this != &m_chainman.ActiveChainstate()) {
2753 // Only log every so often so that we don't bury log messages at the
2754 // tip.
2755 constexpr int BACKGROUND_LOG_INTERVAL = 2000;
2756 if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) {
2758 "[background validation] ");
2759 }
2760 return;
2761 }
2762
2763 // New best block
2764 if (m_mempool) {
2766 }
2767
2768 {
2771 g_best_block_cv.notify_all();
2772 }
2773
2775}
2776
2791 if (m_mempool) {
2793 }
2794
2796
2798 assert(pindexDelete->pprev);
2799
2800 // Read block from disk.
2801 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2802 CBlock &block = *pblock;
2804 return error("DisconnectTip(): Failed to read block");
2805 }
2806
2807 // Apply the block atomically to the chain state.
2809 {
2811 assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2812 if (DisconnectBlock(block, pindexDelete, view) !=
2814 return error("DisconnectTip(): DisconnectBlock %s failed",
2815 pindexDelete->GetBlockHash().ToString());
2816 }
2817
2818 bool flushed = view.Flush();
2819 assert(flushed);
2820 }
2821
2822 LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
2823 (GetTimeMicros() - nStart) * MILLI);
2824
2825 {
2826 // Prune locks that began at or after the tip should be moved backward
2827 // so they get a chance to reorg
2828 const int max_height_first{pindexDelete->nHeight - 1};
2829 for (auto &prune_lock : m_blockman.m_prune_locks) {
2830 if (prune_lock.second.height_first <= max_height_first) {
2831 continue;
2832 }
2833
2834 prune_lock.second.height_first = max_height_first;
2835 LogPrint(BCLog::PRUNE, "%s prune lock moved back to %d\n",
2837 }
2838 }
2839
2840 // Write the chain state to disk, if necessary.
2842 return false;
2843 }
2844
2845 if (m_mempool) {
2846 // If this block is deactivating a fork, we move all mempool
2847 // transactions in front of disconnectpool for reprocessing in a future
2848 // updateMempoolForReorg call
2849 if (pindexDelete->pprev != nullptr &&
2853 "Disconnecting mempool due to rewind of upgrade block\n");
2854 if (disconnectpool) {
2855 disconnectpool->importMempool(*m_mempool);
2856 }
2857 m_mempool->clear();
2858 }
2859
2860 if (disconnectpool) {
2861 disconnectpool->addForBlock(block.vtx, *m_mempool);
2862 }
2863 }
2864
2865 m_chain.SetTip(*pindexDelete->pprev);
2866
2867 UpdateTip(pindexDelete->pprev);
2868 // Let wallets know transactions went from 1-confirmed to
2869 // 0-confirmed or conflicted:
2871 return true;
2872}
2873
2879
2887 const std::shared_ptr<const CBlock> &pblock,
2889 const avalanche::Processor *const avalanche) {
2891 if (m_mempool) {
2893 }
2894
2896
2897 assert(pindexNew->pprev == m_chain.Tip());
2898 // Read block from disk.
2900 std::shared_ptr<const CBlock> pthisBlock;
2901 if (!pblock) {
2902 std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2904 return AbortNode(state, "Failed to read block");
2905 }
2907 } else {
2909 }
2910
2912
2913 // Apply the block atomically to the chain state.
2917 LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n",
2919 {
2924 &blockFees);
2926 if (!rv) {
2927 if (state.IsInvalid()) {
2929 }
2930
2931 return error("%s: ConnectBlock %s failed, %s", __func__,
2932 pindexNew->GetBlockHash().ToString(),
2933 state.ToString());
2934 }
2935
2947 const BlockHash blockhash = pindexNew->GetBlockHash();
2948 if (!IsInitialBlockDownload() &&
2951
2952 const Amount blockReward =
2953 blockFees +
2955
2956 std::vector<std::unique_ptr<ParkingPolicy>> parkingPolicies;
2957 parkingPolicies.emplace_back(std::make_unique<MinerFundPolicy>(
2959
2960 if (avalanche) {
2961 // Only enable the RTT policy if the node already finalized a
2962 // block. This is because it's very possible that new blocks
2963 // will be parked after a node restart (but after IBD) if the
2964 // node is behind by a few blocks. We want to make sure that the
2965 // node will be able to switch back to the right tip in this
2966 // case.
2967 if (avalanche->hasFinalizedTip()) {
2968 // Special case for testnet, don't reject blocks mined with
2969 // the min difficulty
2970 if (!consensusParams.fPowAllowMinDifficultyBlocks ||
2971 (blockConnecting.GetBlockTime() <=
2972 pindexNew->pprev->GetBlockTime() +
2973 2 * consensusParams.nPowTargetSpacing)) {
2974 parkingPolicies.emplace_back(
2975 std::make_unique<RTTPolicy>(consensusParams,
2976 *pindexNew));
2977 }
2978 }
2979
2980 parkingPolicies.emplace_back(
2981 std::make_unique<StakingRewardsPolicy>(
2984
2985 if (m_mempool) {
2986 parkingPolicies.emplace_back(
2987 std::make_unique<PreConsensusPolicy>(
2989 }
2990 }
2991
2992 // If any block policy is violated, bail on the first one found
2993 if (std::find_if_not(parkingPolicies.begin(), parkingPolicies.end(),
2994 [&](const auto &policy) {
2995 bool ret = (*policy)(blockPolicyState);
2996 if (!ret) {
2997 LogPrintf(
2998 "Park block because it "
2999 "violated a block policy: %s\n",
3000 blockPolicyState.ToString());
3001 }
3002 return ret;
3003 }) != parkingPolicies.end()) {
3004 pindexNew->nStatus = pindexNew->nStatus.withParked();
3006 return false;
3007 }
3008 }
3009
3012 assert(nBlocksTotal > 0);
3014 " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n",
3017 bool flushed = view.Flush();
3018 assert(flushed);
3019 }
3020
3023 LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n",
3026
3027 // Write the chain state to disk, if necessary.
3028 if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
3029 return false;
3030 }
3031
3035 " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n",
3038
3039 // Remove conflicting transactions from the mempool;
3040 if (m_mempool) {
3041 disconnectpool.removeForBlock(blockConnecting.vtx, *m_mempool);
3042
3043 // If this block is activating a fork, we move all mempool transactions
3044 // in front of disconnectpool for reprocessing in a future
3045 // updateMempoolForReorg call
3046 if (pindexNew->pprev != nullptr &&
3047 GetNextBlockScriptFlags(pindexNew, m_chainman) !=
3048 GetNextBlockScriptFlags(pindexNew->pprev, m_chainman)) {
3049 LogPrint(
3051 "Disconnecting mempool due to acceptance of upgrade block\n");
3052 disconnectpool.importMempool(*m_mempool);
3053 }
3054 }
3055
3056 // Update m_chain & related variables.
3057 m_chain.SetTip(*pindexNew);
3059
3064 " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n",
3067 LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n",
3070
3071 // If we are the background validation chainstate, check to see if we are
3072 // done validating the snapshot (i.e. our tip has reached the snapshot's
3073 // base block).
3074 if (this != &m_chainman.ActiveChainstate()) {
3075 // This call may set `m_disabled`, which is referenced immediately
3076 // afterwards in ActivateBestChain, so that we stop connecting blocks
3077 // past the snapshot base.
3078 m_chainman.MaybeCompleteSnapshotValidation();
3079 }
3080
3082 return true;
3083}
3084
3090 std::vector<const CBlockIndex *> &blocksToReconcile, bool fAutoUnpark) {
3092 do {
3093 CBlockIndex *pindexNew = nullptr;
3094
3095 // Find the best candidate header.
3096 {
3097 std::set<CBlockIndex *, CBlockIndexWorkComparator>::reverse_iterator
3098 it = setBlockIndexCandidates.rbegin();
3099 if (it == setBlockIndexCandidates.rend()) {
3100 return nullptr;
3101 }
3102 pindexNew = *it;
3103 }
3104
3105 // If this block will cause an avalanche finalized block to be reorged,
3106 // then we park it.
3107 {
3111 LogPrintf("Park block %s because it forks prior to the "
3112 "avalanche finalized chaintip.\n",
3113 pindexNew->GetBlockHash().ToString());
3114 pindexNew->nStatus = pindexNew->nStatus.withParked();
3116 }
3117 }
3118
3120
3121 // Check whether all blocks on the path between the currently active
3122 // chain and the candidate are valid. Just going until the active chain
3123 // is an optimization, as we know all blocks in it are valid already.
3125 bool hasValidAncestor = true;
3127 assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
3128
3129 // If this is a parked chain, but it has enough PoW, clear the park
3130 // state.
3131 bool fParkedChain = pindexTest->nStatus.isOnParkedChain();
3132 if (fAutoUnpark && fParkedChain) {
3133 const CBlockIndex *pindexTip = m_chain.Tip();
3134
3135 // During initialization, pindexTip and/or pindexFork may be
3136 // null. In this case, we just ignore the fact that the chain is
3137 // parked.
3138 if (!pindexTip || !pindexFork) {
3140 continue;
3141 }
3142
3143 // A parked chain can be unparked if it has twice as much PoW
3144 // accumulated as the main chain has since the fork block.
3146 arith_uint256 requiredWork = pindexTip->nChainWork;
3147 switch (pindexTip->nHeight - pindexFork->nHeight) {
3148 // Limit the penality for depth 1, 2 and 3 to half a block
3149 // worth of work to ensure we don't fork accidentally.
3150 case 3:
3151 case 2:
3153 // FALLTHROUGH
3154 case 1: {
3155 const arith_uint256 deltaWork =
3156 pindexExtraPow->nChainWork - pindexFork->nChainWork;
3157 requiredWork += (deltaWork >> 1);
3158 break;
3159 }
3160 default:
3161 requiredWork +=
3162 pindexExtraPow->nChainWork - pindexFork->nChainWork;
3163 break;
3164 }
3165
3166 if (pindexNew->nChainWork > requiredWork) {
3167 // We have enough, clear the parked state.
3168 LogPrintf("Unpark chain up to block %s as it has "
3169 "accumulated enough PoW.\n",
3170 pindexNew->GetBlockHash().ToString());
3171 fParkedChain = false;
3173 }
3174 }
3175
3176 // Pruned nodes may have entries in setBlockIndexCandidates for
3177 // which block files have been deleted. Remove those as candidates
3178 // for the most work chain if we come across them; we can't switch
3179 // to a chain unless we have all the non-active-chain parent blocks.
3180 bool fInvalidChain = pindexTest->nStatus.isInvalid();
3181 bool fMissingData = !pindexTest->nStatus.hasData();
3183 // The current block is acceptable, move to the parent, up to
3184 // the fork point.
3185 pindexTest = pindexTest->pprev;
3186 continue;
3187 }
3188
3189 // Candidate chain is not usable (either invalid or parked or
3190 // missing data)
3191 hasValidAncestor = false;
3193
3194 if (fInvalidChain && (m_chainman.m_best_invalid == nullptr ||
3195 pindexNew->nChainWork >
3196 m_chainman.m_best_invalid->nChainWork)) {
3197 m_chainman.m_best_invalid = pindexNew;
3198 }
3199
3200 if (fParkedChain && (m_chainman.m_best_parked == nullptr ||
3201 pindexNew->nChainWork >
3202 m_chainman.m_best_parked->nChainWork)) {
3203 m_chainman.m_best_parked = pindexNew;
3204 }
3205
3206 LogPrintf("Considered switching to better tip %s but that chain "
3207 "contains a%s%s%s block.\n",
3208 pindexNew->GetBlockHash().ToString(),
3209 fInvalidChain ? "n invalid" : "",
3210 fParkedChain ? " parked" : "",
3211 fMissingData ? " missing-data" : "");
3212
3214 // Remove the entire chain from the set.
3215 while (pindexTest != pindexFailed) {
3216 if (fInvalidChain || fParkedChain) {
3217 pindexFailed->nStatus =
3218 pindexFailed->nStatus.withFailedParent(fInvalidChain)
3219 .withParkedParent(fParkedChain);
3220 } else if (fMissingData) {
3221 // If we're missing data, then add back to
3222 // m_blocks_unlinked, so that if the block arrives in the
3223 // future we can try adding to setBlockIndexCandidates
3224 // again.
3226 std::make_pair(pindexFailed->pprev, pindexFailed));
3227 }
3229 pindexFailed = pindexFailed->pprev;
3230 }
3231
3232 if (fInvalidChain || fParkedChain) {
3233 // We discovered a new chain tip that is either parked or
3234 // invalid, we may want to warn.
3236 }
3237 }
3238
3239 blocksToReconcile.push_back(pindexNew);
3240
3241 // We found a candidate that has valid ancestors. This is our guy.
3242 if (hasValidAncestor) {
3243 return pindexNew;
3244 }
3245 } while (true);
3246}
3247
3253 // Note that we can't delete the current block itself, as we may need to
3254 // return to it later in case a reorganization to a better block fails.
3255 auto it = setBlockIndexCandidates.begin();
3256 while (it != setBlockIndexCandidates.end() &&
3257 setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
3258 setBlockIndexCandidates.erase(it++);
3259 }
3260
3261 // Either the current tip or a successor of it we're working towards is left
3262 // in setBlockIndexCandidates.
3264}
3265
3275 const std::shared_ptr<const CBlock> &pblock, bool &fInvalidFound,
3276 const avalanche::Processor *const avalanche) {
3278 if (m_mempool) {
3280 }
3281
3284
3285 // Disconnect active blocks which are no longer in the best chain.
3286 bool fBlocksDisconnected = false;
3288 while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
3289 if (!fBlocksDisconnected) {
3290 // Import and clear mempool; we must do this to preserve
3291 // topological ordering in the mempool index. This is ok since
3292 // inserts into the mempool are very fast now in our new
3293 // implementation.
3295 }
3296
3297 if (!DisconnectTip(state, &disconnectpool)) {
3298 // This is likely a fatal error, but keep the mempool consistent,
3299 // just in case. Only remove from the mempool in this case.
3300 if (m_mempool) {
3301 disconnectpool.updateMempoolForReorg(*this, false, *m_mempool);
3302 }
3303
3304 // If we're unable to disconnect a block during normal operation,
3305 // then that is a failure of our local system -- we should abort
3306 // rather than stay on a less work chain.
3307 AbortNode(state,
3308 "Failed to disconnect block; see debug.log for details");
3309 return false;
3310 }
3311
3312 fBlocksDisconnected = true;
3313 }
3314
3315 // Build list of new blocks to connect.
3316 std::vector<CBlockIndex *> vpindexToConnect;
3317 bool fContinue = true;
3318 int nHeight = pindexFork ? pindexFork->nHeight : -1;
3319 while (fContinue && nHeight != pindexMostWork->nHeight) {
3320 // Don't iterate the entire list of potential improvements toward the
3321 // best tip, as we likely only need a few blocks along the way.
3322 int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
3323 vpindexToConnect.clear();
3326 while (pindexIter && pindexIter->nHeight != nHeight) {
3327 vpindexToConnect.push_back(pindexIter);
3329 }
3330
3332
3333 // Connect new blocks.
3338 ? pblock
3339 : std::shared_ptr<const CBlock>(),
3341 if (state.IsInvalid()) {
3342 // The block violates a consensus rule.
3343 if (state.GetResult() !=
3346 }
3347 state = BlockValidationState();
3348 fInvalidFound = true;
3349 fContinue = false;
3350 break;
3351 }
3352
3353 if (blockPolicyState.IsInvalid()) {
3354 // The block violates a policy rule.
3355 fContinue = false;
3356 break;
3357 }
3358
3359 // A system error occurred (disk space, database error, ...).
3360 // Make the mempool consistent with the current tip, just in
3361 // case any observers try to use it before shutdown.
3362 if (m_mempool) {
3363 disconnectpool.updateMempoolForReorg(*this, false,
3364 *m_mempool);
3365 }
3366 return false;
3367 } else {
3369 if (!pindexOldTip ||
3370 m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
3371 // We're in a better position than we were. Return
3372 // temporarily to release the lock.
3373 fContinue = false;
3374 break;
3375 }
3376 }
3377 }
3378 }
3379
3380 if (m_mempool) {
3381 if (fBlocksDisconnected || !disconnectpool.isEmpty()) {
3382 // If any blocks were disconnected, we need to update the mempool
3383 // even if disconnectpool is empty. The disconnectpool may also be
3384 // non-empty if the mempool was imported due to new validation rules
3385 // being in effect.
3387 "Updating mempool due to reorganization or "
3388 "rules upgrade/downgrade\n");
3389 disconnectpool.updateMempoolForReorg(*this, true, *m_mempool);
3390 }
3391
3392 m_mempool->check(this->CoinsTip(), this->m_chain.Height() + 1);
3393 }
3394
3395 // Callbacks/notifications for a new best chain.
3396 if (fInvalidFound) {
3398 } else {
3400 }
3401
3402 return true;
3403}
3404
3406 if (!init) {
3408 }
3409 if (::fReindex) {
3411 }
3413}
3414
3416 bool fNotify = false;
3417 bool fInitialBlockDownload = false;
3418 static CBlockIndex *pindexHeaderOld = nullptr;
3419 CBlockIndex *pindexHeader = nullptr;
3420 {
3421 LOCK(cs_main);
3422 pindexHeader = chainstate.m_chainman.m_best_header;
3423
3425 fNotify = true;
3426 fInitialBlockDownload = chainstate.IsInitialBlockDownload();
3428 }
3429 }
3430
3431 // Send block tip changed notifications without cs_main
3432 if (fNotify) {
3433 chainstate.m_chainman.GetNotifications().headerTip(
3435 pindexHeader->nHeight, pindexHeader->nTime, false);
3436 }
3437 return fNotify;
3438}
3439
3442
3443 if (GetMainSignals().CallbacksPending() > 10) {
3445 }
3446}
3447
3449 std::shared_ptr<const CBlock> pblock,
3451 bool skip_checkblockindex) {
3453
3454 // Note that while we're often called here from ProcessNewBlock, this is
3455 // far from a guarantee. Things in the P2P/RPC will often end up calling
3456 // us in the middle of ProcessNewBlock - do not assume pblock is set
3457 // sanely for performance or correctness!
3459
3460 // ABC maintains a fair degree of expensive-to-calculate internal state
3461 // because this function periodically releases cs_main so that it does not
3462 // lock up other threads for too long during large connects - and to allow
3463 // for e.g. the callback queue to drain we use m_chainstate_mutex to enforce
3464 // mutual exclusion so that only one caller may execute this function at a
3465 // time
3467
3468 // Belt-and-suspenders check that we aren't attempting to advance the
3469 // background chainstate past the snapshot base block.
3470 if (WITH_LOCK(::cs_main, return m_disabled)) {
3471 LogPrintf("m_disabled is set - this chainstate should not be in "
3472 "operation. Please report this as a bug. %s\n",
3474 return false;
3475 }
3476
3477 CBlockIndex *pindexMostWork = nullptr;
3478 CBlockIndex *pindexNewTip = nullptr;
3479 int nStopAtHeight = gArgs.GetIntArg("-stopatheight", DEFAULT_STOPATHEIGHT);
3480 do {
3481 // Block until the validation queue drains. This should largely
3482 // never happen in normal operation, however may happen during
3483 // reindex, causing memory blowup if we run too far ahead.
3484 // Note that if a validationinterface callback ends up calling
3485 // ActivateBestChain this may lead to a deadlock! We should
3486 // probably have a DEBUG_LOCKORDER test for this in the future.
3488
3489 std::vector<const CBlockIndex *> blocksToReconcile;
3490 bool blocks_connected = false;
3491
3492 const bool fAutoUnpark =
3493 gArgs.GetBoolArg("-automaticunparking", !avalanche);
3494
3495 {
3496 LOCK(cs_main);
3497 // Lock transaction pool for at least as long as it takes for
3498 // updateMempoolForReorg to be executed if needed
3499 LOCK(MempoolMutex());
3501 do {
3502 // We absolutely may not unlock cs_main until we've made forward
3503 // progress (with the exception of shutdown due to hardware
3504 // issues, low disk space, etc).
3505
3506 if (pindexMostWork == nullptr) {
3509 }
3510
3511 // Whether we have anything to do at all.
3512 if (pindexMostWork == nullptr ||
3513 pindexMostWork == m_chain.Tip()) {
3514 break;
3515 }
3516
3517 bool fInvalidFound = false;
3518 std::shared_ptr<const CBlock> nullBlockPtr;
3520 state, pindexMostWork,
3521 pblock && pblock->GetHash() ==
3522 pindexMostWork->GetBlockHash()
3523 ? pblock
3524 : nullBlockPtr,
3526 // A system error occurred
3527 return false;
3528 }
3529 blocks_connected = true;
3530
3531 if (fInvalidFound ||
3532 (pindexMostWork && pindexMostWork->nStatus.isParked())) {
3533 // Wipe cache, we may need another branch now.
3534 pindexMostWork = nullptr;
3535 }
3536
3538
3539 // This will have been toggled in
3540 // ActivateBestChainStep -> ConnectTip ->
3541 // MaybeCompleteSnapshotValidation, if at all, so we should
3542 // catch it here.
3543 //
3544 // Break this do-while to ensure we don't advance past the base
3545 // snapshot.
3546 if (m_disabled) {
3547 break;
3548 }
3549 } while (!m_chain.Tip() ||
3551 m_chain.Tip(), starting_tip)));
3552
3553 // Check the index once we're done with the above loop, since
3554 // we're going to release cs_main soon. If the index is in a bad
3555 // state now, then it's better to know immediately rather than
3556 // randomly have it cause a problem in a race.
3557 if (!skip_checkblockindex) {
3559 }
3560
3561 if (blocks_connected) {
3564
3565 // Notify external listeners about the new tip.
3566 // Enqueue while holding cs_main to ensure that UpdatedBlockTip
3567 // is called in the order in which blocks are connected
3568 if (pindexFork != pindexNewTip) {
3569 // Notify ValidationInterface subscribers
3572
3573 // Always notify the UI if a new block tip was connected
3576 *pindexNewTip);
3577 }
3578 }
3579 }
3580 // When we reach this point, we switched to a new tip (stored in
3581 // pindexNewTip).
3582 if (avalanche) {
3583 const CBlockIndex *pfinalized =
3586 for (const CBlockIndex *pindex : blocksToReconcile) {
3587 avalanche->addToReconcile(pindex);
3588
3589 // Compute staking rewards for all blocks with more chainwork to
3590 // just after the finalized block. We could stop at the fork
3591 // point, but this is more robust.
3592 if (blocks_connected) {
3593 const CBlockIndex *pindexTest = pindex;
3594 while (pindexTest && pindexTest != pfinalized) {
3595 if (pindexTest->nHeight < pindex->nHeight - 3) {
3596 // Only compute up to some max depth
3597 break;
3598 }
3599 avalanche->computeStakingReward(pindexTest);
3600 pindexTest = pindexTest->pprev;
3601 }
3602 }
3603 }
3604 }
3605
3606 if (!blocks_connected) {
3607 return true;
3608 }
3609
3610 if (nStopAtHeight && pindexNewTip &&
3611 pindexNewTip->nHeight >= nStopAtHeight) {
3612 StartShutdown();
3613 }
3614
3615 if (WITH_LOCK(::cs_main, return m_disabled)) {
3616 // Background chainstate has reached the snapshot base block, so
3617 // exit.
3618 break;
3619 }
3620
3621 // We check shutdown only after giving ActivateBestChainStep a chance to
3622 // run once so that we never shutdown before connecting the genesis
3623 // block during LoadChainTip(). Previously this caused an assert()
3624 // failure during shutdown in such cases as the UTXO DB flushing checks
3625 // that the best block hash is non-null.
3626 if (ShutdownRequested()) {
3627 break;
3628 }
3629 } while (pindexNewTip != pindexMostWork);
3630
3631 // Write changes periodically to disk, after relay.
3633 return false;
3634 }
3635
3636 return true;
3637}
3638
3643 {
3644 LOCK(cs_main);
3645 if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
3646 // Nothing to do, this block is not at the tip.
3647 return true;
3648 }
3649
3651 // The chain has been extended since the last call, reset the
3652 // counter.
3654 }
3655
3657 setBlockIndexCandidates.erase(pindex);
3659 if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
3660 // We can't keep reducing the counter if somebody really wants to
3661 // call preciousblock 2**31-1 times on the same set of tips...
3663 }
3664
3665 // In case this was parked, unpark it.
3666 UnparkBlock(pindex);
3667
3668 // Make sure it is added to the candidate list if appropriate.
3669 if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3670 pindex->HaveTxsDownloaded()) {
3671 setBlockIndexCandidates.insert(pindex);
3673 }
3674 }
3675
3676 return ActivateBestChain(state, /*pblock=*/nullptr, avalanche);
3677}
3678
3679namespace {
3680// Leverage RAII to run a functor at scope end
3681template <typename Func> struct Defer {
3682 Func func;
3683 Defer(Func &&f) : func(std::move(f)) {}
3684 ~Defer() { func(); }
3685};
3686} // namespace
3687
3689 bool invalidate) {
3690 // Genesis block can't be invalidated or parked
3691 assert(pindex);
3692 if (pindex->nHeight == 0) {
3693 return false;
3694 }
3695
3697 bool pindex_was_in_chain = false;
3698 int disconnected = 0;
3699
3700 // We do not allow ActivateBestChain() to run while UnwindBlock() is
3701 // running, as that could cause the tip to change while we disconnect
3702 // blocks. (Note for backport of Core PR16849: we acquire
3703 // LOCK(m_chainstate_mutex) in the Park, Invalidate and FinalizeBlock
3704 // functions due to differences in our code)
3706
3707 // We'll be acquiring and releasing cs_main below, to allow the validation
3708 // callbacks to run. However, we should keep the block index in a
3709 // consistent state as we disconnect blocks -- in particular we need to
3710 // add equal-work blocks to setBlockIndexCandidates as we disconnect.
3711 // To avoid walking the block index repeatedly in search of candidates,
3712 // build a map once so that we can look up candidate blocks by chain
3713 // work as we go.
3714 std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
3715
3716 {
3717 LOCK(cs_main);
3718 for (auto &entry : m_blockman.m_block_index) {
3719 CBlockIndex *candidate = &entry.second;
3720 // We don't need to put anything in our active chain into the
3721 // multimap, because those candidates will be found and considered
3722 // as we disconnect.
3723 // Instead, consider only non-active-chain blocks that have at
3724 // least as much work as where we expect the new tip to end up.
3725 if (!m_chain.Contains(candidate) &&
3728 candidate->HaveTxsDownloaded()) {
3730 std::make_pair(candidate->nChainWork, candidate));
3731 }
3732 }
3733 }
3734
3735 {
3736 LOCK(cs_main);
3737 // Lock for as long as disconnectpool is in scope to make sure
3738 // UpdateMempoolForReorg is called after DisconnectTip without unlocking
3739 // in between
3740 LOCK(MempoolMutex());
3741
3742 constexpr int maxDisconnectPoolBlocks = 10;
3743 bool ret = false;
3745 // After 10 blocks this becomes nullptr, so that DisconnectTip will
3746 // stop giving us unwound block txs if we are doing a deep unwind.
3748
3749 // Disable thread safety analysis because we can't require m_mempool->cs
3750 // as m_mempool can be null. We keep the runtime analysis though.
3753 if (m_mempool && !disconnectpool.isEmpty()) {
3755 // DisconnectTip will add transactions to disconnectpool.
3756 // When all unwinding is done and we are on a new tip, we must
3757 // add all transactions back to the mempool against the new tip.
3758 disconnectpool.updateMempoolForReorg(*this,
3759 /* fAddToMempool = */ ret,
3760 *m_mempool);
3761 }
3762 });
3763
3764 // Disconnect (descendants of) pindex, and mark them invalid.
3765 while (true) {
3766 if (ShutdownRequested()) {
3767 break;
3768 }
3769
3770 // Make sure the queue of validation callbacks doesn't grow
3771 // unboundedly.
3772 // FIXME this commented code is a regression and could cause OOM if
3773 // a very old block is invalidated via the invalidateblock RPC.
3774 // This can be uncommented if the main signals are moved away from
3775 // cs_main or this code is refactored so that cs_main can be
3776 // released at this point.
3777 //
3778 // LimitValidationInterfaceQueue();
3779
3780 if (!m_chain.Contains(pindex)) {
3781 break;
3782 }
3783
3784 if (m_mempool && disconnected == 0) {
3785 // On first iteration, we grab all the mempool txs to preserve
3786 // topological ordering. This has the side-effect of temporarily
3787 // clearing the mempool, but we will re-add later in
3788 // updateMempoolForReorg() (above). This technique guarantees
3789 // mempool consistency as well as ensures that our topological
3790 // entry_id index is always correct.
3792 }
3793
3794 pindex_was_in_chain = true;
3796
3797 // ActivateBestChain considers blocks already in m_chain
3798 // unconditionally valid already, so force disconnect away from it.
3799
3801 ++disconnected;
3802
3803 if (optDisconnectPool && disconnected > maxDisconnectPoolBlocks) {
3804 // Stop using the disconnect pool after 10 blocks. After 10
3805 // blocks we no longer add block tx's to the disconnectpool.
3806 // However, when this scope ends we will reconcile what's
3807 // in the pool with the new tip (in the deferred d'tor above).
3808 optDisconnectPool = nullptr;
3809 }
3810
3811 if (!ret) {
3812 return false;
3813 }
3814
3815 assert(invalid_walk_tip->pprev == m_chain.Tip());
3816
3817 // We immediately mark the disconnected blocks as invalid.
3818 // This prevents a case where pruned nodes may fail to
3819 // invalidateblock and be left unable to start as they have no tip
3820 // candidates (as there are no blocks that meet the "have data and
3821 // are not invalid per nStatus" criteria for inclusion in
3822 // setBlockIndexCandidates).
3823
3824 invalid_walk_tip->nStatus =
3825 invalidate ? invalid_walk_tip->nStatus.withFailed()
3826 : invalid_walk_tip->nStatus.withParked();
3827
3830
3832 (invalidate ? to_mark_failed_or_parked->nStatus.hasFailed()
3833 : to_mark_failed_or_parked->nStatus.isParked())) {
3834 // We only want to mark the last disconnected block as
3835 // Failed (or Parked); its children need to be FailedParent (or
3836 // ParkedParent) instead.
3837 to_mark_failed_or_parked->nStatus =
3838 (invalidate
3839 ? to_mark_failed_or_parked->nStatus.withFailed(false)
3840 .withFailedParent()
3841 : to_mark_failed_or_parked->nStatus.withParked(false)
3842 .withParkedParent());
3843
3845 }
3846
3847 // Add any equal or more work headers to setBlockIndexCandidates
3848 auto candidate_it = candidate_blocks_by_work.lower_bound(
3849 invalid_walk_tip->pprev->nChainWork);
3850 while (candidate_it != candidate_blocks_by_work.end()) {
3852 invalid_walk_tip->pprev)) {
3853 setBlockIndexCandidates.insert(candidate_it->second);
3855 } else {
3856 ++candidate_it;
3857 }
3858 }
3859
3860 // Track the last disconnected block, so we can correct its
3861 // FailedParent (or ParkedParent) status in future iterations, or,
3862 // if it's the last one, call InvalidChainFound on it.
3864 }
3865 }
3866
3868
3869 {
3870 LOCK(cs_main);
3872 // If the to-be-marked invalid block is in the active chain,
3873 // something is interfering and we can't proceed.
3874 return false;
3875 }
3876
3877 // Mark pindex (or the last disconnected block) as invalid (or parked),
3878 // even when it never was in the main chain.
3879 to_mark_failed_or_parked->nStatus =
3880 invalidate ? to_mark_failed_or_parked->nStatus.withFailed()
3881 : to_mark_failed_or_parked->nStatus.withParked();
3883 if (invalidate) {
3885 }
3886
3887 // If any new blocks somehow arrived while we were disconnecting
3888 // (above), then the pre-calculation of what should go into
3889 // setBlockIndexCandidates may have missed entries. This would
3890 // technically be an inconsistency in the block index, but if we clean
3891 // it up here, this should be an essentially unobservable error.
3892 // Loop back over all block index entries and add any missing entries
3893 // to setBlockIndexCandidates.
3894 for (auto &[_, block_index] : m_blockman.m_block_index) {
3896 block_index.HaveTxsDownloaded() &&
3897 !setBlockIndexCandidates.value_comp()(&block_index,
3898 m_chain.Tip())) {
3900 }
3901 }
3902
3903 if (invalidate) {
3905 }
3906 }
3907
3908 // Only notify about a new block tip if the active chain was modified.
3909 if (pindex_was_in_chain) {
3912 *to_mark_failed_or_parked->pprev);
3913 }
3914 return true;
3915}
3916
3918 CBlockIndex *pindex) {
3921 // See 'Note for backport of Core PR16849' in Chainstate::UnwindBlock
3923
3924 return UnwindBlock(state, pindex, true);
3925}
3926
3930 // See 'Note for backport of Core PR16849' in Chainstate::UnwindBlock
3932
3933 return UnwindBlock(state, pindex, false);
3934}
3935
3936template <typename F>
3938 CBlockIndex *pindex, F f) {
3939 BlockStatus newStatus = f(pindex->nStatus);
3940 if (pindex->nStatus != newStatus &&
3941 (!pindexBase ||
3942 pindex->GetAncestor(pindexBase->nHeight) == pindexBase)) {
3943 pindex->nStatus = newStatus;
3944 m_blockman.m_dirty_blockindex.insert(pindex);
3945 if (newStatus.isValid()) {
3946 m_chainman.m_failed_blocks.erase(pindex);
3947 }
3948
3949 if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3950 pindex->HaveTxsDownloaded() &&
3951 setBlockIndexCandidates.value_comp()(m_chain.Tip(), pindex)) {
3952 setBlockIndexCandidates.insert(pindex);
3953 }
3954 return true;
3955 }
3956 return false;
3957}
3958
3959template <typename F, typename C, typename AC>
3963
3964 // Update the current block and ancestors; while we're doing this, identify
3965 // which was the deepest ancestor we changed.
3967 for (auto pindexAncestor = pindex; pindexAncestor != nullptr;
3969 if (UpdateFlagsForBlock(nullptr, pindexAncestor, f)) {
3971 }
3972 }
3973
3974 if (pindexReset &&
3975 pindexReset->GetAncestor(pindexDeepestChanged->nHeight) ==
3977 // reset pindexReset if it had a modified ancestor.
3978 pindexReset = nullptr;
3979 }
3980
3981 // Update all blocks under modified blocks.
3982 for (auto &[_, block_index] : m_blockman.m_block_index) {
3986 }
3987}
3988
3991
3993 pindex, m_chainman.m_best_invalid,
3994 [](const BlockStatus status) {
3995 return status.withClearedFailureFlags();
3996 },
3997 [](const BlockStatus status) {
3998 return status.withClearedFailureFlags();
3999 },
4000 [](const BlockStatus status) {
4001 return status.withFailedParent(false);
4002 });
4003}
4004
4007
4009 pindex, m_chainman.m_best_parked,
4010 [](const BlockStatus status) {
4011 return status.withClearedParkedFlags();
4012 },
4013 [fClearChildren](const BlockStatus status) {
4014 return fClearChildren ? status.withClearedParkedFlags()
4015 : status.withParkedParent(false);
4016 },
4017 [](const BlockStatus status) {
4018 return status.withParkedParent(false);
4019 });
4020}
4021
4023 return UnparkBlockImpl(pindex, true);
4024}
4025
4027 return UnparkBlockImpl(pindex, false);
4028}
4029
4032 if (!pindex) {
4033 return false;
4034 }
4035
4036 if (!m_chain.Contains(pindex)) {
4038 "The block to mark finalized by avalanche is not on the "
4039 "active chain: %s\n",
4040 pindex->GetBlockHash().ToString());
4041 return false;
4042 }
4043
4044 avalanche.cleanupStakingRewards(pindex->nHeight);
4045
4046 if (IsBlockAvalancheFinalized(pindex)) {
4047 return true;
4048 }
4049
4050 {
4053 }
4054
4055 WITH_LOCK(cs_main, GetMainSignals().BlockFinalized(pindex));
4056
4057 return true;
4058}
4059
4064
4071
4078 const FlatFilePos &pos) {
4079 pindexNew->nTx = block.vtx.size();
4081 pindexNew->nFile = pos.nFile;
4082 pindexNew->nDataPos = pos.nPos;
4083 pindexNew->nUndoPos = 0;
4084 pindexNew->nStatus = pindexNew->nStatus.withData();
4085 pindexNew->RaiseValidity(BlockValidity::TRANSACTIONS);
4087
4088 if (pindexNew->UpdateChainStats()) {
4089 // If pindexNew is the genesis block or all parents are
4090 // BLOCK_VALID_TRANSACTIONS.
4091 std::deque<CBlockIndex *> queue;
4092 queue.push_back(pindexNew);
4093
4094 // Recursively process any descendant blocks that now may be eligible to
4095 // be connected.
4096 while (!queue.empty()) {
4097 CBlockIndex *pindex = queue.front();
4098 queue.pop_front();
4099 pindex->UpdateChainStats();
4100 if (pindex->nSequenceId == 0) {
4101 // We assign a sequence is when transaction are received to
4102 // prevent a miner from being able to broadcast a block but not
4103 // its content. However, a sequence id may have been set
4104 // manually, for instance via PreciousBlock, in which case, we
4105 // don't need to assign one.
4106 pindex->nSequenceId = nBlockSequenceId++;
4107 }
4108
4109 if (m_chain.Tip() == nullptr ||
4110 !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
4111 setBlockIndexCandidates.insert(pindex);
4112 }
4113
4114 std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
4115 std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
4116 range = m_blockman.m_blocks_unlinked.equal_range(pindex);
4117 while (range.first != range.second) {
4118 std::multimap<CBlockIndex *, CBlockIndex *>::iterator it =
4119 range.first;
4120 queue.push_back(it->second);
4121 range.first++;
4122 m_blockman.m_blocks_unlinked.erase(it);
4123 }
4124 }
4125 } else if (pindexNew->pprev &&
4126 pindexNew->pprev->IsValid(BlockValidity::TREE)) {
4128 std::make_pair(pindexNew->pprev, pindexNew));
4129 }
4130}
4131
4140static bool CheckBlockHeader(const CBlockHeader &block,
4141 BlockValidationState &state,
4142 const Consensus::Params &params,
4144 // Check proof of work matches claimed amount
4145 if (validationOptions.shouldValidatePoW() &&
4146 !CheckProofOfWork(block.GetHash(), block.nBits, params)) {
4148 "high-hash", "proof of work failed");
4149 }
4150
4151 return true;
4152}
4153
4154bool CheckBlock(const CBlock &block, BlockValidationState &state,
4155 const Consensus::Params &params,
4157 // These are checks that are independent of context.
4158 if (block.fChecked) {
4159 return true;
4160 }
4161
4162 // Check that the header is valid (particularly PoW). This is mostly
4163 // redundant with the call in AcceptBlockHeader.
4164 if (!CheckBlockHeader(block, state, params, validationOptions)) {
4165 return false;
4166 }
4167
4168 // Check the merkle root.
4169 if (validationOptions.shouldValidateMerkleRoot()) {
4170 bool mutated;
4172 if (block.hashMerkleRoot != hashMerkleRoot2) {
4174 "bad-txnmrklroot", "hashMerkleRoot mismatch");
4175 }
4176
4177 // Check for merkle tree malleability (CVE-2012-2459): repeating
4178 // sequences of transactions in a block without affecting the merkle
4179 // root of a block, while still invalidating it.
4180 if (mutated) {
4182 "bad-txns-duplicate", "duplicate transaction");
4183 }
4184 }
4185
4186 // All potential-corruption validation must be done before we do any
4187 // transaction validation, as otherwise we may mark the header as invalid
4188 // because we receive the wrong transactions for it.
4189
4190 // First transaction must be coinbase.
4191 if (block.vtx.empty()) {
4193 "bad-cb-missing", "first tx is not coinbase");
4194 }
4195
4196 // Size limits.
4197 auto nMaxBlockSize = validationOptions.getExcessiveBlockSize();
4198
4199 // Bail early if there is no way this block is of reasonable size.
4200 if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) {
4202 "bad-blk-length", "size limits failed");
4203 }
4204
4206 if (currentBlockSize > nMaxBlockSize) {
4208 "bad-blk-length", "size limits failed");
4209 }
4210
4211 // And a valid coinbase.
4213 if (!CheckCoinbase(*block.vtx[0], tx_state)) {
4215 tx_state.GetRejectReason(),
4216 strprintf("Coinbase check failed (txid %s) %s",
4217 block.vtx[0]->GetId().ToString(),
4218 tx_state.GetDebugMessage()));
4219 }
4220
4221 // Check transactions for regularity, skipping the first. Note that this
4222 // is the first time we check that all after the first are !IsCoinBase.
4223 for (size_t i = 1; i < block.vtx.size(); i++) {
4224 auto *tx = block.vtx[i].get();
4225 if (!CheckRegularTransaction(*tx, tx_state)) {
4226 return state.Invalid(
4228 tx_state.GetRejectReason(),
4229 strprintf("Transaction check failed (txid %s) %s",
4230 tx->GetId().ToString(), tx_state.GetDebugMessage()));
4231 }
4232 }
4233
4234 if (validationOptions.shouldValidatePoW() &&
4235 validationOptions.shouldValidateMerkleRoot()) {
4236 block.fChecked = true;
4237 }
4238
4239 return true;
4240}
4241
4242bool HasValidProofOfWork(const std::vector<CBlockHeader> &headers,
4244 return std::all_of(headers.cbegin(), headers.cend(),
4245 [&](const auto &header) {
4246 return CheckProofOfWork(
4247 header.GetHash(), header.nBits, consensusParams);
4248 });
4249}
4250
4251arith_uint256 CalculateHeadersWork(const std::vector<CBlockHeader> &headers) {
4253 for (const CBlockHeader &header : headers) {
4254 CBlockIndex dummy(header);
4255 total_work += GetBlockProof(dummy);
4256 }
4257 return total_work;
4258}
4259
4271 const CBlockHeader &block, BlockValidationState &state,
4274 const std::optional<CCheckpointData> &test_checkpoints = std::nullopt)
4277 assert(pindexPrev != nullptr);
4278 const int nHeight = pindexPrev->nHeight + 1;
4279
4280 const CChainParams &params = chainman.GetParams();
4281
4282 // Check proof of work
4283 if (block.nBits != GetNextWorkRequired(pindexPrev, &block, params)) {
4284 LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight);
4286 "bad-diffbits", "incorrect proof of work");
4287 }
4288
4289 // Check against checkpoints
4290 if (chainman.m_options.checkpoints_enabled) {
4292 test_checkpoints ? test_checkpoints.value() : params.Checkpoints();
4293
4294 // Check that the block chain matches the known block chain up to a
4295 // checkpoint.
4298 "ERROR: %s: rejected by checkpoint lock-in at %d\n",
4299 __func__, nHeight);
4301 "checkpoint mismatch");
4302 }
4303
4304 // Don't accept any forks from the main chain prior to last checkpoint.
4305 // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's
4306 // in our BlockIndex().
4307
4308 const CBlockIndex *pcheckpoint =
4309 blockman.GetLastCheckpoint(checkpoints);
4312 "ERROR: %s: forked chain older than last checkpoint "
4313 "(height %d)\n",
4314 __func__, nHeight);
4316 "bad-fork-prior-to-checkpoint");
4317 }
4318 }
4319
4320 // Check timestamp against prev
4321 if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) {
4323 "time-too-old", "block's timestamp is too early");
4324 }
4325
4326 // Check timestamp
4327 if (block.Time() > now + std::chrono::seconds{MAX_FUTURE_BLOCK_TIME}) {
4329 "time-too-new",
4330 "block timestamp too far in the future");
4331 }
4332
4333 // Reject blocks with outdated version
4334 if ((block.nVersion < 2 &&
4337 (block.nVersion < 3 &&
4340 (block.nVersion < 4 &&
4343 return state.Invalid(
4345 strprintf("bad-version(0x%08x)", block.nVersion),
4346 strprintf("rejected nVersion=0x%08x block", block.nVersion));
4347 }
4348
4349 return true;
4350}
4351
4353 const CBlockIndex &active_chain_tip, const Consensus::Params &params,
4354 const CTransaction &tx, TxValidationState &state) {
4356
4357 // ContextualCheckTransactionForCurrentBlock() uses
4358 // active_chain_tip.Height()+1 to evaluate nLockTime because when
4359 // IsFinalTx() is called within AcceptBlock(), the height of the
4360 // block *being* evaluated is what is used. Thus if we want to know if a
4361 // transaction can be part of the *next* block, we need to call
4362 // ContextualCheckTransaction() with one more than
4363 // active_chain_tip.Height().
4364 const int nBlockHeight = active_chain_tip.nHeight + 1;
4365
4366 // BIP113 will require that time-locked transactions have nLockTime set to
4367 // less than the median time of the previous block they're contained in.
4368 // When the next block is created its previous block will be the current
4369 // chain tip, so we use that to calculate the median time passed to
4370 // ContextualCheckTransaction().
4371 // This time can also be used for consensus upgrades.
4372 const int64_t nMedianTimePast{active_chain_tip.GetMedianTimePast()};
4373
4374 return ContextualCheckTransaction(params, tx, state, nBlockHeight,
4376}
4377
4385static bool ContextualCheckBlock(const CBlock &block,
4386 BlockValidationState &state,
4387 const ChainstateManager &chainman,
4388 const CBlockIndex *pindexPrev) {
4389 const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
4390
4391 // Enforce BIP113 (Median Time Past).
4393 if (DeploymentActiveAfter(pindexPrev, chainman,
4395 assert(pindexPrev != nullptr);
4397 }
4398
4399 const int64_t nMedianTimePast =
4400 pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast();
4401
4404 : block.GetBlockTime()};
4405
4406 const Consensus::Params params = chainman.GetConsensus();
4407 const bool fIsMagneticAnomalyEnabled =
4409
4410 // Check transactions:
4411 // - canonical ordering
4412 // - ensure they are finalized
4413 // - check they have the minimum size
4414 const CTransaction *prevTx = nullptr;
4415 for (const auto &ptx : block.vtx) {
4416 const CTransaction &tx = *ptx;
4418 if (prevTx && (tx.GetId() <= prevTx->GetId())) {
4419 if (tx.GetId() == prevTx->GetId()) {
4421 "tx-duplicate",
4422 strprintf("Duplicated transaction %s",
4423 tx.GetId().ToString()));
4424 }
4425
4426 return state.Invalid(
4428 strprintf("Transaction order is invalid (%s < %s)",
4429 tx.GetId().ToString(),
4430 prevTx->GetId().ToString()));
4431 }
4432
4433 if (prevTx || !tx.IsCoinBase()) {
4434 prevTx = &tx;
4435 }
4436 }
4437
4440 nLockTimeCutoff)) {
4442 tx_state.GetRejectReason(),
4443 tx_state.GetDebugMessage());
4444 }
4445 }
4446
4447 // Enforce rule that the coinbase starts with serialized block height
4448 if (DeploymentActiveAfter(pindexPrev, chainman,
4451 if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
4452 !std::equal(expect.begin(), expect.end(),
4453 block.vtx[0]->vin[0].scriptSig.begin())) {
4455 "bad-cb-height",
4456 "block height mismatch in coinbase");
4457 }
4458 }
4459
4460 return true;
4461}
4462
4469 const CBlockHeader &block, BlockValidationState &state,
4471 const std::optional<CCheckpointData> &test_checkpoints) {
4473 const Config &config = this->GetConfig();
4474 const CChainParams &chainparams = config.GetChainParams();
4475
4476 // Check for duplicate
4477 BlockHash hash = block.GetHash();
4478 BlockMap::iterator miSelf{m_blockman.m_block_index.find(hash)};
4479 if (hash != chainparams.GetConsensus().hashGenesisBlock) {
4480 if (miSelf != m_blockman.m_block_index.end()) {
4481 // Block header is already known.
4482 CBlockIndex *pindex = &(miSelf->second);
4483 if (ppindex) {
4484 *ppindex = pindex;
4485 }
4486
4487 if (pindex->nStatus.isInvalid()) {
4488 LogPrint(BCLog::VALIDATION, "%s: block %s is marked invalid\n",
4489 __func__, hash.ToString());
4490 return state.Invalid(
4492 }
4493
4494 return true;
4495 }
4496
4497 if (!CheckBlockHeader(block, state, chainparams.GetConsensus(),
4498 BlockValidationOptions(config))) {
4500 "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__,
4501 hash.ToString(), state.ToString());
4502 return false;
4503 }
4504
4505 // Get prev block index
4506 BlockMap::iterator mi{
4507 m_blockman.m_block_index.find(block.hashPrevBlock)};
4508 if (mi == m_blockman.m_block_index.end()) {
4510 "header %s has prev block not found: %s\n",
4511 hash.ToString(), block.hashPrevBlock.ToString());
4513 "prev-blk-not-found");
4514 }
4515
4516 CBlockIndex *pindexPrev = &((*mi).second);
4518 if (pindexPrev->nStatus.isInvalid()) {
4520 "header %s has prev block invalid: %s\n", hash.ToString(),
4521 block.hashPrevBlock.ToString());
4523 "bad-prevblk");
4524 }
4525
4527 block, state, m_blockman, *this, pindexPrev,
4530 "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n",
4531 __func__, hash.ToString(), state.ToString());
4532 return false;
4533 }
4534
4535 /* Determine if this block descends from any block which has been found
4536 * invalid (m_failed_blocks), then mark pindexPrev and any blocks
4537 * between them as failed. For example:
4538 *
4539 * D3
4540 * /
4541 * B2 - C2
4542 * / \
4543 * A D2 - E2 - F2
4544 * \
4545 * B1 - C1 - D1 - E1
4546 *
4547 * In the case that we attempted to reorg from E1 to F2, only to find
4548 * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
4549 * but NOT D3 (it was not in any of our candidate sets at the time).
4550 *
4551 * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
4552 * in LoadBlockIndex.
4553 */
4554 if (!pindexPrev->IsValid(BlockValidity::SCRIPTS)) {
4555 // The above does not mean "invalid": it checks if the previous
4556 // block hasn't been validated up to BlockValidity::SCRIPTS. This is
4557 // a performance optimization, in the common case of adding a new
4558 // block to the tip, we don't need to iterate over the failed blocks
4559 // list.
4560 for (const CBlockIndex *failedit : m_failed_blocks) {
4561 if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
4562 assert(failedit->nStatus.hasFailed());
4564 while (invalid_walk != failedit) {
4565 invalid_walk->nStatus =
4566 invalid_walk->nStatus.withFailedParent();
4568 invalid_walk = invalid_walk->pprev;
4569 }
4571 "header %s has prev block invalid: %s\n",
4572 hash.ToString(), block.hashPrevBlock.ToString());
4573 return state.Invalid(
4575 "bad-prevblk");
4576 }
4577 }
4578 }
4579 }
4580 if (!min_pow_checked) {
4582 "%s: not adding new block header %s, missing anti-dos "
4583 "proof-of-work validation\n",
4584 __func__, hash.ToString());
4586 "too-little-chainwork");
4587 }
4589
4590 if (ppindex) {
4591 *ppindex = pindex;
4592 }
4593
4594 // Since this is the earliest point at which we have determined that a
4595 // header is both new and valid, log here.
4596 //
4597 // These messages are valuable for detecting potential selfish mining
4598 // behavior; if multiple displacing headers are seen near simultaneously
4599 // across many nodes in the network, this might be an indication of selfish
4600 // mining. Having this log by default when not in IBD ensures broad
4601 // availability of this data in case investigation is merited.
4602 const auto msg = strprintf("Saw new header hash=%s height=%d",
4603 hash.ToString(), pindex->nHeight);
4604
4605 if (ActiveChainstate().IsInitialBlockDownload()) {
4607 } else {
4608 LogPrintf("%s\n", msg);
4609 }
4610
4611 return true;
4612}
4613
4614// Exposed wrapper for AcceptBlockHeader
4616 const std::vector<CBlockHeader> &headers, bool min_pow_checked,
4618 const std::optional<CCheckpointData> &test_checkpoints) {
4620 {
4621 LOCK(cs_main);
4622 for (const CBlockHeader &header : headers) {
4623 // Use a temp pindex instead of ppindex to avoid a const_cast
4624 CBlockIndex *pindex = nullptr;
4626 header, state, &pindex, min_pow_checked, test_checkpoints);
4627 ActiveChainstate().CheckBlockIndex();
4628
4629 if (!accepted) {
4630 return false;
4631 }
4632
4633 if (ppindex) {
4634 *ppindex = pindex;
4635 }
4636 }
4637 }
4638
4640 if (ActiveChainstate().IsInitialBlockDownload() && ppindex &&
4641 *ppindex) {
4643 const int64_t blocks_left{
4644 (GetTime() - last_accepted.GetBlockTime()) /
4646 const double progress{100.0 * last_accepted.nHeight /
4647 (last_accepted.nHeight + blocks_left)};
4648 LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n",
4649 last_accepted.nHeight, progress);
4650 }