Bitcoin Core  24.99.0
P2P Digital Currency
validation.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2022 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <validation.h>
7 
8 #include <kernel/coinstats.h>
10 
11 #include <arith_uint256.h>
12 #include <chain.h>
13 #include <checkqueue.h>
14 #include <consensus/amount.h>
15 #include <consensus/consensus.h>
16 #include <consensus/merkle.h>
17 #include <consensus/tx_check.h>
18 #include <consensus/tx_verify.h>
19 #include <consensus/validation.h>
20 #include <cuckoocache.h>
21 #include <flatfile.h>
22 #include <fs.h>
23 #include <hash.h>
24 #include <kernel/chainparams.h>
25 #include <kernel/mempool_entry.h>
26 #include <logging.h>
27 #include <logging/timer.h>
28 #include <node/blockstorage.h>
29 #include <node/interface_ui.h>
30 #include <node/utxo_snapshot.h>
31 #include <policy/policy.h>
32 #include <policy/rbf.h>
33 #include <policy/settings.h>
34 #include <pow.h>
35 #include <primitives/block.h>
36 #include <primitives/transaction.h>
37 #include <random.h>
38 #include <reverse_iterator.h>
39 #include <script/script.h>
40 #include <script/sigcache.h>
41 #include <shutdown.h>
42 #include <signet.h>
43 #include <tinyformat.h>
44 #include <txdb.h>
45 #include <txmempool.h>
46 #include <uint256.h>
47 #include <undo.h>
48 #include <util/check.h> // For NDEBUG compile time check
49 #include <util/hasher.h>
50 #include <util/moneystr.h>
51 #include <util/rbf.h>
52 #include <util/strencodings.h>
53 #include <util/system.h>
54 #include <util/time.h>
55 #include <util/trace.h>
56 #include <util/translation.h>
57 #include <validationinterface.h>
58 #include <warnings.h>
59 
60 #include <algorithm>
61 #include <cassert>
62 #include <chrono>
63 #include <deque>
64 #include <numeric>
65 #include <optional>
66 #include <string>
67 #include <utility>
68 
73 
74 using fsbridge::FopenFn;
75 using node::BlockManager;
76 using node::BlockMap;
79 using node::fReindex;
84 
86 static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
88 static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
90 static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
92 static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3};
93 const std::vector<std::string> CHECKLEVEL_DOC {
94  "level 0 reads the blocks from disk",
95  "level 1 verifies block validity",
96  "level 2 verifies undo data",
97  "level 3 checks disconnection of tip blocks",
98  "level 4 tries to reconnect the blocks",
99  "each level includes the checks of the previous levels",
100 };
106 static constexpr int PRUNE_LOCK_BUFFER{10};
107 
109 std::condition_variable g_best_block_cv;
111 
113 {
115 
116  // Find the latest block common to locator and chain - we expect that
117  // locator.vHave is sorted descending by height.
118  for (const uint256& hash : locator.vHave) {
119  const CBlockIndex* pindex{m_blockman.LookupBlockIndex(hash)};
120  if (pindex) {
121  if (m_chain.Contains(pindex)) {
122  return pindex;
123  }
124  if (pindex->GetAncestor(m_chain.Height()) == m_chain.Tip()) {
125  return m_chain.Tip();
126  }
127  }
128  }
129  return m_chain.Genesis();
130 }
131 
132 bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
133  const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
134  bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
135  std::vector<CScriptCheck>* pvChecks = nullptr)
137 
138 bool CheckFinalTxAtTip(const CBlockIndex& active_chain_tip, const CTransaction& tx)
139 {
141 
142  // CheckFinalTxAtTip() uses active_chain_tip.Height()+1 to evaluate
143  // nLockTime because when IsFinalTx() is called within
144  // AcceptBlock(), the height of the block *being*
145  // evaluated is what is used. Thus if we want to know if a
146  // transaction can be part of the *next* block, we need to call
147  // IsFinalTx() with one more than active_chain_tip.Height().
148  const int nBlockHeight = active_chain_tip.nHeight + 1;
149 
150  // BIP113 requires that time-locked transactions have nLockTime set to
151  // less than the median time of the previous block they're contained in.
152  // When the next block is created its previous block will be the current
153  // chain tip, so we use that to calculate the median time passed to
154  // IsFinalTx().
155  const int64_t nBlockTime{active_chain_tip.GetMedianTimePast()};
156 
157  return IsFinalTx(tx, nBlockHeight, nBlockTime);
158 }
159 
160 namespace {
171 std::optional<std::vector<int>> CalculatePrevHeights(
172  const CBlockIndex& tip,
173  const CCoinsView& coins,
174  const CTransaction& tx)
175 {
176  std::vector<int> prev_heights;
177  prev_heights.resize(tx.vin.size());
178  for (size_t i = 0; i < tx.vin.size(); ++i) {
179  const CTxIn& txin = tx.vin[i];
180  Coin coin;
181  if (!coins.GetCoin(txin.prevout, coin)) {
182  LogPrintf("ERROR: %s: Missing input %d in transaction \'%s\'\n", __func__, i, tx.GetHash().GetHex());
183  return std::nullopt;
184  }
185  if (coin.nHeight == MEMPOOL_HEIGHT) {
186  // Assume all mempool transaction confirm in the next block.
187  prev_heights[i] = tip.nHeight + 1;
188  } else {
189  prev_heights[i] = coin.nHeight;
190  }
191  }
192  return prev_heights;
193 }
194 } // namespace
195 
196 std::optional<LockPoints> CalculateLockPointsAtTip(
197  CBlockIndex* tip,
198  const CCoinsView& coins_view,
199  const CTransaction& tx)
200 {
201  assert(tip);
202 
203  auto prev_heights{CalculatePrevHeights(*tip, coins_view, tx)};
204  if (!prev_heights.has_value()) return std::nullopt;
205 
206  CBlockIndex next_tip;
207  next_tip.pprev = tip;
208  // When SequenceLocks() is called within ConnectBlock(), the height
209  // of the block *being* evaluated is what is used.
210  // Thus if we want to know if a transaction can be part of the
211  // *next* block, we need to use one more than active_chainstate.m_chain.Height()
212  next_tip.nHeight = tip->nHeight + 1;
213  const auto [min_height, min_time] = CalculateSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, prev_heights.value(), next_tip);
214 
215  // Also store the hash of the block with the highest height of
216  // all the blocks which have sequence locked prevouts.
217  // This hash needs to still be on the chain
218  // for these LockPoint calculations to be valid
219  // Note: It is impossible to correctly calculate a maxInputBlock
220  // if any of the sequence locked inputs depend on unconfirmed txs,
221  // except in the special case where the relative lock time/height
222  // is 0, which is equivalent to no sequence lock. Since we assume
223  // input height of tip+1 for mempool txs and test the resulting
224  // min_height and min_time from CalculateSequenceLocks against tip+1.
225  int max_input_height{0};
226  for (const int height : prev_heights.value()) {
227  // Can ignore mempool inputs since we'll fail if they had non-zero locks
228  if (height != next_tip.nHeight) {
229  max_input_height = std::max(max_input_height, height);
230  }
231  }
232 
233  // tip->GetAncestor(max_input_height) should never return a nullptr
234  // because max_input_height is always less than the tip height.
235  // It would, however, be a bad bug to continue execution, since a
236  // LockPoints object with the maxInputBlock member set to nullptr
237  // signifies no relative lock time.
238  return LockPoints{min_height, min_time, Assert(tip->GetAncestor(max_input_height))};
239 }
240 
242  const LockPoints& lock_points)
243 {
244  assert(tip != nullptr);
245 
246  CBlockIndex index;
247  index.pprev = tip;
248  // CheckSequenceLocksAtTip() uses active_chainstate.m_chain.Height()+1 to evaluate
249  // height based locks because when SequenceLocks() is called within
250  // ConnectBlock(), the height of the block *being*
251  // evaluated is what is used.
252  // Thus if we want to know if a transaction can be part of the
253  // *next* block, we need to use one more than active_chainstate.m_chain.Height()
254  index.nHeight = tip->nHeight + 1;
255 
256  return EvaluateSequenceLocks(index, {lock_points.height, lock_points.time});
257 }
258 
259 // Returns the script flags which should be checked for a given block
260 static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const ChainstateManager& chainman);
261 
262 static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache)
264 {
266  AssertLockHeld(pool.cs);
267  int expired = pool.Expire(GetTime<std::chrono::seconds>() - pool.m_expiry);
268  if (expired != 0) {
269  LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
270  }
271 
272  std::vector<COutPoint> vNoSpendsRemaining;
273  pool.TrimToSize(pool.m_max_size_bytes, &vNoSpendsRemaining);
274  for (const COutPoint& removed : vNoSpendsRemaining)
275  coins_cache.Uncache(removed);
276 }
277 
279 {
281  if (active_chainstate.IsInitialBlockDownload())
282  return false;
283  if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE))
284  return false;
285  if (active_chainstate.m_chain.Height() < active_chainstate.m_chainman.m_best_header->nHeight - 1) {
286  return false;
287  }
288  return true;
289 }
290 
292  DisconnectedBlockTransactions& disconnectpool,
293  bool fAddToMempool)
294 {
295  if (!m_mempool) return;
296 
299  std::vector<uint256> vHashUpdate;
300  // disconnectpool's insertion_order index sorts the entries from
301  // oldest to newest, but the oldest entry will be the last tx from the
302  // latest mined block that was disconnected.
303  // Iterate disconnectpool in reverse, so that we add transactions
304  // back to the mempool starting with the earliest transaction that had
305  // been previously seen in a block.
306  auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
307  while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
308  // ignore validation errors in resurrected transactions
309  if (!fAddToMempool || (*it)->IsCoinBase() ||
310  AcceptToMemoryPool(*this, *it, GetTime(),
311  /*bypass_limits=*/true, /*test_accept=*/false).m_result_type !=
313  // If the transaction doesn't make it in to the mempool, remove any
314  // transactions that depend on it (which would now be orphans).
316  } else if (m_mempool->exists(GenTxid::Txid((*it)->GetHash()))) {
317  vHashUpdate.push_back((*it)->GetHash());
318  }
319  ++it;
320  }
321  disconnectpool.queuedTx.clear();
322  // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
323  // no in-mempool children, which is generally not true when adding
324  // previously-confirmed transactions back to the mempool.
325  // UpdateTransactionsFromBlock finds descendants of any transactions in
326  // the disconnectpool that were added back and cleans up the mempool state.
328 
329  // Predicate to use for filtering transactions in removeForReorg.
330  // Checks whether the transaction is still final and, if it spends a coinbase output, mature.
331  // Also updates valid entries' cached LockPoints if needed.
332  // If false, the tx is still valid and its lockpoints are updated.
333  // If true, the tx would be invalid in the next block; remove this entry and all of its descendants.
334  const auto filter_final_and_mature = [this](CTxMemPool::txiter it)
338  const CTransaction& tx = it->GetTx();
339 
340  // The transaction must be final.
341  if (!CheckFinalTxAtTip(*Assert(m_chain.Tip()), tx)) return true;
342 
343  const LockPoints& lp = it->GetLockPoints();
344  // CheckSequenceLocksAtTip checks if the transaction will be final in the next block to be
345  // created on top of the new chain.
348  return true;
349  }
350  } else {
351  const CCoinsViewMemPool view_mempool{&CoinsTip(), *m_mempool};
352  const std::optional<LockPoints> new_lock_points{CalculateLockPointsAtTip(m_chain.Tip(), view_mempool, tx)};
353  if (new_lock_points.has_value() && CheckSequenceLocksAtTip(m_chain.Tip(), *new_lock_points)) {
354  // Now update the mempool entry lockpoints as well.
355  m_mempool->mapTx.modify(it, [&new_lock_points](CTxMemPoolEntry& e) { e.UpdateLockPoints(*new_lock_points); });
356  } else {
357  return true;
358  }
359  }
360 
361  // If the transaction spends any coinbase outputs, it must be mature.
362  if (it->GetSpendsCoinbase()) {
363  for (const CTxIn& txin : tx.vin) {
364  auto it2 = m_mempool->mapTx.find(txin.prevout.hash);
365  if (it2 != m_mempool->mapTx.end())
366  continue;
367  const Coin& coin{CoinsTip().AccessCoin(txin.prevout)};
368  assert(!coin.IsSpent());
369  const auto mempool_spend_height{m_chain.Tip()->nHeight + 1};
370  if (coin.IsCoinBase() && mempool_spend_height - coin.nHeight < COINBASE_MATURITY) {
371  return true;
372  }
373  }
374  }
375  // Transaction is still valid and cached LockPoints are updated.
376  return false;
377  };
378 
379  // We also need to remove any now-immature transactions
380  m_mempool->removeForReorg(m_chain, filter_final_and_mature);
381  // Re-limit mempool size, in case we added any transactions
383 }
384 
391  const CCoinsViewCache& view, const CTxMemPool& pool,
392  unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip)
394 {
397 
399  for (const CTxIn& txin : tx.vin) {
400  const Coin& coin = view.AccessCoin(txin.prevout);
401 
402  // This coin was checked in PreChecks and MemPoolAccept
403  // has been holding cs_main since then.
404  Assume(!coin.IsSpent());
405  if (coin.IsSpent()) return false;
406 
407  // If the Coin is available, there are 2 possibilities:
408  // it is available in our current ChainstateActive UTXO set,
409  // or it's a UTXO provided by a transaction in our mempool.
410  // Ensure the scriptPubKeys in Coins from CoinsView are correct.
411  const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
412  if (txFrom) {
413  assert(txFrom->GetHash() == txin.prevout.hash);
414  assert(txFrom->vout.size() > txin.prevout.n);
415  assert(txFrom->vout[txin.prevout.n] == coin.out);
416  } else {
417  const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
418  assert(!coinFromUTXOSet.IsSpent());
419  assert(coinFromUTXOSet.out == coin.out);
420  }
421  }
422 
423  // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules.
424  return CheckInputScripts(tx, state, view, flags, /* cacheSigStore= */ true, /* cacheFullScriptStore= */ true, txdata);
425 }
426 
427 namespace {
428 
429 class MemPoolAccept
430 {
431 public:
432  explicit MemPoolAccept(CTxMemPool& mempool, Chainstate& active_chainstate) :
433  m_pool(mempool),
434  m_view(&m_dummy),
435  m_viewmempool(&active_chainstate.CoinsTip(), m_pool),
436  m_active_chainstate(active_chainstate),
437  m_limits{m_pool.m_limits}
438  {
439  }
440 
441  // We put the arguments we're handed into a struct, so we can pass them
442  // around easier.
443  struct ATMPArgs {
444  const CChainParams& m_chainparams;
445  const int64_t m_accept_time;
446  const bool m_bypass_limits;
447  /*
448  * Return any outpoints which were not previously present in the coins
449  * cache, but were added as a result of validating the tx for mempool
450  * acceptance. This allows the caller to optionally remove the cache
451  * additions if the associated transaction ends up being rejected by
452  * the mempool.
453  */
454  std::vector<COutPoint>& m_coins_to_uncache;
455  const bool m_test_accept;
459  const bool m_allow_replacement;
464  const bool m_package_submission;
468  const bool m_package_feerates;
469 
471  static ATMPArgs SingleAccept(const CChainParams& chainparams, int64_t accept_time,
472  bool bypass_limits, std::vector<COutPoint>& coins_to_uncache,
473  bool test_accept) {
474  return ATMPArgs{/* m_chainparams */ chainparams,
475  /* m_accept_time */ accept_time,
476  /* m_bypass_limits */ bypass_limits,
477  /* m_coins_to_uncache */ coins_to_uncache,
478  /* m_test_accept */ test_accept,
479  /* m_allow_replacement */ true,
480  /* m_package_submission */ false,
481  /* m_package_feerates */ false,
482  };
483  }
484 
486  static ATMPArgs PackageTestAccept(const CChainParams& chainparams, int64_t accept_time,
487  std::vector<COutPoint>& coins_to_uncache) {
488  return ATMPArgs{/* m_chainparams */ chainparams,
489  /* m_accept_time */ accept_time,
490  /* m_bypass_limits */ false,
491  /* m_coins_to_uncache */ coins_to_uncache,
492  /* m_test_accept */ true,
493  /* m_allow_replacement */ false,
494  /* m_package_submission */ false, // not submitting to mempool
495  /* m_package_feerates */ false,
496  };
497  }
498 
500  static ATMPArgs PackageChildWithParents(const CChainParams& chainparams, int64_t accept_time,
501  std::vector<COutPoint>& coins_to_uncache) {
502  return ATMPArgs{/* m_chainparams */ chainparams,
503  /* m_accept_time */ accept_time,
504  /* m_bypass_limits */ false,
505  /* m_coins_to_uncache */ coins_to_uncache,
506  /* m_test_accept */ false,
507  /* m_allow_replacement */ false,
508  /* m_package_submission */ true,
509  /* m_package_feerates */ true,
510  };
511  }
512 
514  static ATMPArgs SingleInPackageAccept(const ATMPArgs& package_args) {
515  return ATMPArgs{/* m_chainparams */ package_args.m_chainparams,
516  /* m_accept_time */ package_args.m_accept_time,
517  /* m_bypass_limits */ false,
518  /* m_coins_to_uncache */ package_args.m_coins_to_uncache,
519  /* m_test_accept */ package_args.m_test_accept,
520  /* m_allow_replacement */ true,
521  /* m_package_submission */ false,
522  /* m_package_feerates */ false, // only 1 transaction
523  };
524  }
525 
526  private:
527  // Private ctor to avoid exposing details to clients and allowing the possibility of
528  // mixing up the order of the arguments. Use static functions above instead.
529  ATMPArgs(const CChainParams& chainparams,
530  int64_t accept_time,
531  bool bypass_limits,
532  std::vector<COutPoint>& coins_to_uncache,
533  bool test_accept,
534  bool allow_replacement,
535  bool package_submission,
536  bool package_feerates)
537  : m_chainparams{chainparams},
538  m_accept_time{accept_time},
539  m_bypass_limits{bypass_limits},
540  m_coins_to_uncache{coins_to_uncache},
541  m_test_accept{test_accept},
542  m_allow_replacement{allow_replacement},
543  m_package_submission{package_submission},
544  m_package_feerates{package_feerates}
545  {
546  }
547  };
548 
549  // Single transaction acceptance
550  MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
551 
557  PackageMempoolAcceptResult AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
558 
563  PackageMempoolAcceptResult AcceptPackage(const Package& package, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
564 
565 private:
566  // All the intermediate state that gets passed between the various levels
567  // of checking a given transaction.
568  struct Workspace {
569  explicit Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
571  std::set<uint256> m_conflicts;
573  CTxMemPool::setEntries m_iters_conflicting;
576  CTxMemPool::setEntries m_all_conflicting;
578  CTxMemPool::setEntries m_ancestors;
581  std::unique_ptr<CTxMemPoolEntry> m_entry;
585  std::list<CTransactionRef> m_replaced_transactions;
586 
589  int64_t m_vsize;
591  CAmount m_base_fees;
593  CAmount m_modified_fees;
595  CAmount m_conflicting_fees{0};
597  size_t m_conflicting_size{0};
598 
602  CFeeRate m_package_feerate{0};
603 
604  const CTransactionRef& m_ptx;
606  const uint256& m_hash;
607  TxValidationState m_state;
610  PrecomputedTransactionData m_precomputed_txdata;
611  };
612 
613  // Run the policy checks on a given transaction, excluding any script checks.
614  // Looks up inputs, calculates feerate, considers replacement, evaluates
615  // package limits, etc. As this function can be invoked for "free" by a peer,
616  // only tests that are fast should be done here (to avoid CPU DoS).
617  bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
618 
619  // Run checks for mempool replace-by-fee.
620  bool ReplacementChecks(Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
621 
622  // Enforce package mempool ancestor/descendant limits (distinct from individual
623  // ancestor/descendant limits done in PreChecks).
624  bool PackageMempoolChecks(const std::vector<CTransactionRef>& txns,
625  PackageValidationState& package_state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
626 
627  // Run the script checks using our policy flags. As this can be slow, we should
628  // only invoke this on transactions that have otherwise passed policy checks.
629  bool PolicyScriptChecks(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
630 
631  // Re-run the script checks, using consensus flags, and try to cache the
632  // result in the scriptcache. This should be done after
633  // PolicyScriptChecks(). This requires that all inputs either be in our
634  // utxo set or in the mempool.
635  bool ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
636 
637  // Try to add the transaction to the mempool, removing any conflicts first.
638  // Returns true if the transaction is in the mempool after any size
639  // limiting is performed, false otherwise.
640  bool Finalize(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
641 
642  // Submit all transactions to the mempool and call ConsensusScriptChecks to add to the script
643  // cache - should only be called after successful validation of all transactions in the package.
644  // The package may end up partially-submitted after size limiting; returns true if all
645  // transactions are successfully added to the mempool, false otherwise.
646  bool SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, PackageValidationState& package_state,
647  std::map<const uint256, const MempoolAcceptResult>& results)
648  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
649 
650  // Compare a package's feerate against minimum allowed.
651  bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_pool.cs)
652  {
654  AssertLockHeld(m_pool.cs);
655  CAmount mempoolRejectFee = m_pool.GetMinFee().GetFee(package_size);
656  if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
657  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
658  }
659 
660  if (package_fee < m_pool.m_min_relay_feerate.GetFee(package_size)) {
661  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met",
662  strprintf("%d < %d", package_fee, m_pool.m_min_relay_feerate.GetFee(package_size)));
663  }
664  return true;
665  }
666 
667 private:
668  CTxMemPool& m_pool;
669  CCoinsViewCache m_view;
670  CCoinsViewMemPool m_viewmempool;
671  CCoinsView m_dummy;
672 
673  Chainstate& m_active_chainstate;
674 
675  CTxMemPool::Limits m_limits;
676 
678  bool m_rbf{false};
679 };
680 
681 bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
682 {
684  AssertLockHeld(m_pool.cs);
685  const CTransactionRef& ptx = ws.m_ptx;
686  const CTransaction& tx = *ws.m_ptx;
687  const uint256& hash = ws.m_hash;
688 
689  // Copy/alias what we need out of args
690  const int64_t nAcceptTime = args.m_accept_time;
691  const bool bypass_limits = args.m_bypass_limits;
692  std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
693 
694  // Alias what we need out of ws
695  TxValidationState& state = ws.m_state;
696  std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
697 
698  if (!CheckTransaction(tx, state)) {
699  return false; // state filled in by CheckTransaction
700  }
701 
702  // Coinbase is only valid in a block, not as a loose transaction
703  if (tx.IsCoinBase())
704  return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase");
705 
706  // Rather not work on nonstandard transactions (unless -testnet/-regtest)
707  std::string reason;
708  if (m_pool.m_require_standard && !IsStandardTx(tx, m_pool.m_max_datacarrier_bytes, m_pool.m_permit_bare_multisig, m_pool.m_dust_relay_feerate, reason)) {
709  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
710  }
711 
712  // Transactions smaller than 65 non-witness bytes are not relayed to mitigate CVE-2017-12842.
714  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
715 
716  // Only accept nLockTime-using transactions that can be mined in the next
717  // block; we don't want our mempool filled up with transactions that can't
718  // be mined yet.
719  if (!CheckFinalTxAtTip(*Assert(m_active_chainstate.m_chain.Tip()), tx)) {
720  return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
721  }
722 
723  if (m_pool.exists(GenTxid::Wtxid(tx.GetWitnessHash()))) {
724  // Exact transaction already exists in the mempool.
725  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
726  } else if (m_pool.exists(GenTxid::Txid(tx.GetHash()))) {
727  // Transaction with the same non-witness data but different witness (same txid, different
728  // wtxid) already exists in the mempool.
729  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-same-nonwitness-data-in-mempool");
730  }
731 
732  // Check for conflicts with in-memory transactions
733  for (const CTxIn &txin : tx.vin)
734  {
735  const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
736  if (ptxConflicting) {
737  if (!args.m_allow_replacement) {
738  // Transaction conflicts with a mempool tx, but we're not allowing replacements.
739  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "bip125-replacement-disallowed");
740  }
741  if (!ws.m_conflicts.count(ptxConflicting->GetHash()))
742  {
743  // Transactions that don't explicitly signal replaceability are
744  // *not* replaceable with the current logic, even if one of their
745  // unconfirmed ancestors signals replaceability. This diverges
746  // from BIP125's inherited signaling description (see CVE-2021-31876).
747  // Applications relying on first-seen mempool behavior should
748  // check all unconfirmed ancestors; otherwise an opt-in ancestor
749  // might be replaced, causing removal of this descendant.
750  //
751  // If replaceability signaling is ignored due to node setting,
752  // replacement is always allowed.
753  if (!m_pool.m_full_rbf && !SignalsOptInRBF(*ptxConflicting)) {
754  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
755  }
756 
757  ws.m_conflicts.insert(ptxConflicting->GetHash());
758  }
759  }
760  }
761 
762  m_view.SetBackend(m_viewmempool);
763 
764  const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip();
765  // do all inputs exist?
766  for (const CTxIn& txin : tx.vin) {
767  if (!coins_cache.HaveCoinInCache(txin.prevout)) {
768  coins_to_uncache.push_back(txin.prevout);
769  }
770 
771  // Note: this call may add txin.prevout to the coins cache
772  // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
773  // later (via coins_to_uncache) if this tx turns out to be invalid.
774  if (!m_view.HaveCoin(txin.prevout)) {
775  // Are inputs missing because we already have the tx?
776  for (size_t out = 0; out < tx.vout.size(); out++) {
777  // Optimistically just do efficient check of cache for outputs
778  if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
779  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known");
780  }
781  }
782  // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
783  return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent");
784  }
785  }
786 
787  // This is const, but calls into the back end CoinsViews. The CCoinsViewDB at the bottom of the
788  // hierarchy brings the best block into scope. See CCoinsViewDB::GetBestBlock().
789  m_view.GetBestBlock();
790 
791  // we have all inputs cached now, so switch back to dummy (to protect
792  // against bugs where we pull more inputs from disk that miss being added
793  // to coins_to_uncache)
794  m_view.SetBackend(m_dummy);
795 
796  assert(m_active_chainstate.m_blockman.LookupBlockIndex(m_view.GetBestBlock()) == m_active_chainstate.m_chain.Tip());
797 
798  // Only accept BIP68 sequence locked transactions that can be mined in the next
799  // block; we don't want our mempool filled up with transactions that can't
800  // be mined yet.
801  // Pass in m_view which has all of the relevant inputs cached. Note that, since m_view's
802  // backend was removed, it no longer pulls coins from the mempool.
803  const std::optional<LockPoints> lock_points{CalculateLockPointsAtTip(m_active_chainstate.m_chain.Tip(), m_view, tx)};
804  if (!lock_points.has_value() || !CheckSequenceLocksAtTip(m_active_chainstate.m_chain.Tip(), *lock_points)) {
805  return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final");
806  }
807 
808  // The mempool holds txs for the next block, so pass height+1 to CheckTxInputs
809  if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_chain.Height() + 1, ws.m_base_fees)) {
810  return false; // state filled in by CheckTxInputs
811  }
812 
813  if (m_pool.m_require_standard && !AreInputsStandard(tx, m_view)) {
814  return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
815  }
816 
817  // Check for non-standard witnesses.
818  if (tx.HasWitness() && m_pool.m_require_standard && !IsWitnessStandard(tx, m_view)) {
819  return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard");
820  }
821 
822  int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
823 
824  // ws.m_modified_fees includes any fee deltas from PrioritiseTransaction
825  ws.m_modified_fees = ws.m_base_fees;
826  m_pool.ApplyDelta(hash, ws.m_modified_fees);
827 
828  // Keep track of transactions that spend a coinbase, which we re-scan
829  // during reorgs to ensure COINBASE_MATURITY is still met.
830  bool fSpendsCoinbase = false;
831  for (const CTxIn &txin : tx.vin) {
832  const Coin &coin = m_view.AccessCoin(txin.prevout);
833  if (coin.IsCoinBase()) {
834  fSpendsCoinbase = true;
835  break;
836  }
837  }
838 
839  entry.reset(new CTxMemPoolEntry(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(),
840  fSpendsCoinbase, nSigOpsCost, lock_points.value()));
841  ws.m_vsize = entry->GetTxSize();
842 
843  if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
844  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops",
845  strprintf("%d", nSigOpsCost));
846 
847  // No individual transactions are allowed below the min relay feerate and mempool min feerate except from
848  // disconnected blocks and transactions in a package. Package transactions will be checked using
849  // package feerate later.
850  if (!bypass_limits && !args.m_package_feerates && !CheckFeeRate(ws.m_vsize, ws.m_modified_fees, state)) return false;
851 
852  ws.m_iters_conflicting = m_pool.GetIterSet(ws.m_conflicts);
853  // Calculate in-mempool ancestors, up to a limit.
854  if (ws.m_conflicts.size() == 1) {
855  // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
856  // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
857  // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
858  // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
859  // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
860  // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
861  // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
862  // for off-chain contract systems (see link in the comment below).
863  //
864  // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
865  // conflict directly with exactly one other transaction (but may evict children of said transaction),
866  // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
867  // check is accomplished later, so we don't bother doing anything about it here, but if our
868  // policy changes, we may need to move that check to here instead of removing it wholesale.
869  //
870  // Such transactions are clearly not merging any existing packages, so we are only concerned with
871  // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
872  // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
873  // to.
874  //
875  // To check these we first check if we meet the RBF criteria, above, and increment the descendant
876  // limits by the direct conflict and its descendants (as these are recalculated in
877  // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
878  // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
879  // the ancestor limits should be the same for both our new transaction and any conflicts).
880  // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
881  // into force here (as we're only adding a single transaction).
882  assert(ws.m_iters_conflicting.size() == 1);
883  CTxMemPool::txiter conflict = *ws.m_iters_conflicting.begin();
884 
885  m_limits.descendant_count += 1;
886  m_limits.descendant_size_vbytes += conflict->GetSizeWithDescendants();
887  }
888 
889  auto ancestors{m_pool.CalculateMemPoolAncestors(*entry, m_limits)};
890  if (!ancestors) {
891  // If CalculateMemPoolAncestors fails second time, we want the original error string.
892  // Contracting/payment channels CPFP carve-out:
893  // If the new transaction is relatively small (up to 40k weight)
894  // and has at most one ancestor (ie ancestor limit of 2, including
895  // the new transaction), allow it if its parent has exactly the
896  // descendant limit descendants.
897  //
898  // This allows protocols which rely on distrusting counterparties
899  // being able to broadcast descendants of an unconfirmed transaction
900  // to be secure by simply only having two immediately-spendable
901  // outputs - one for each counterparty. For more info on the uses for
902  // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
903  CTxMemPool::Limits cpfp_carve_out_limits{
904  .ancestor_count = 2,
905  .ancestor_size_vbytes = m_limits.ancestor_size_vbytes,
906  .descendant_count = m_limits.descendant_count + 1,
907  .descendant_size_vbytes = m_limits.descendant_size_vbytes + EXTRA_DESCENDANT_TX_SIZE_LIMIT,
908  };
909  const auto error_message{util::ErrorString(ancestors).original};
910  if (ws.m_vsize > EXTRA_DESCENDANT_TX_SIZE_LIMIT) {
911  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", error_message);
912  }
913  ancestors = m_pool.CalculateMemPoolAncestors(*entry, cpfp_carve_out_limits);
914  if (!ancestors) return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", error_message);
915  }
916 
917  ws.m_ancestors = *ancestors;
918 
919  // A transaction that spends outputs that would be replaced by it is invalid. Now
920  // that we have the set of all ancestors we can detect this
921  // pathological case by making sure ws.m_conflicts and ws.m_ancestors don't
922  // intersect.
923  if (const auto err_string{EntriesAndTxidsDisjoint(ws.m_ancestors, ws.m_conflicts, hash)}) {
924  // We classify this as a consensus error because a transaction depending on something it
925  // conflicts with would be inconsistent.
926  return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx", *err_string);
927  }
928 
929  m_rbf = !ws.m_conflicts.empty();
930  return true;
931 }
932 
933 bool MemPoolAccept::ReplacementChecks(Workspace& ws)
934 {
936  AssertLockHeld(m_pool.cs);
937 
938  const CTransaction& tx = *ws.m_ptx;
939  const uint256& hash = ws.m_hash;
940  TxValidationState& state = ws.m_state;
941 
942  CFeeRate newFeeRate(ws.m_modified_fees, ws.m_vsize);
943  // Enforce Rule #6. The replacement transaction must have a higher feerate than its direct conflicts.
944  // - The motivation for this check is to ensure that the replacement transaction is preferable for
945  // block-inclusion, compared to what would be removed from the mempool.
946  // - This logic predates ancestor feerate-based transaction selection, which is why it doesn't
947  // consider feerates of descendants.
948  // - Note: Ancestor feerate-based transaction selection has made this comparison insufficient to
949  // guarantee that this is incentive-compatible for miners, because it is possible for a
950  // descendant transaction of a direct conflict to pay a higher feerate than the transaction that
951  // might replace them, under these rules.
952  if (const auto err_string{PaysMoreThanConflicts(ws.m_iters_conflicting, newFeeRate, hash)}) {
953  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", *err_string);
954  }
955 
956  // Calculate all conflicting entries and enforce Rule #5.
957  if (const auto err_string{GetEntriesForConflicts(tx, m_pool, ws.m_iters_conflicting, ws.m_all_conflicting)}) {
959  "too many potential replacements", *err_string);
960  }
961  // Enforce Rule #2.
962  if (const auto err_string{HasNoNewUnconfirmed(tx, m_pool, ws.m_iters_conflicting)}) {
964  "replacement-adds-unconfirmed", *err_string);
965  }
966  // Check if it's economically rational to mine this transaction rather than the ones it
967  // replaces and pays for its own relay fees. Enforce Rules #3 and #4.
968  for (CTxMemPool::txiter it : ws.m_all_conflicting) {
969  ws.m_conflicting_fees += it->GetModifiedFee();
970  ws.m_conflicting_size += it->GetTxSize();
971  }
972  if (const auto err_string{PaysForRBF(ws.m_conflicting_fees, ws.m_modified_fees, ws.m_vsize,
973  m_pool.m_incremental_relay_feerate, hash)}) {
974  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", *err_string);
975  }
976  return true;
977 }
978 
979 bool MemPoolAccept::PackageMempoolChecks(const std::vector<CTransactionRef>& txns,
980  PackageValidationState& package_state)
981 {
983  AssertLockHeld(m_pool.cs);
984 
985  // CheckPackageLimits expects the package transactions to not already be in the mempool.
986  assert(std::all_of(txns.cbegin(), txns.cend(), [this](const auto& tx)
987  { return !m_pool.exists(GenTxid::Txid(tx->GetHash()));}));
988 
989  std::string err_string;
990  if (!m_pool.CheckPackageLimits(txns, m_limits, err_string)) {
991  // This is a package-wide error, separate from an individual transaction error.
992  return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-mempool-limits", err_string);
993  }
994  return true;
995 }
996 
997 bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws)
998 {
1000  AssertLockHeld(m_pool.cs);
1001  const CTransaction& tx = *ws.m_ptx;
1002  TxValidationState& state = ws.m_state;
1003 
1004  constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
1005 
1006  // Check input scripts and signatures.
1007  // This is done last to help prevent CPU exhaustion denial-of-service attacks.
1008  if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata)) {
1009  // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
1010  // need to turn both off, and compare against just turning off CLEANSTACK
1011  // to see if the failure is specifically due to witness validation.
1012  TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
1013  if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, ws.m_precomputed_txdata) &&
1014  !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, ws.m_precomputed_txdata)) {
1015  // Only the witness is missing, so the transaction itself may be fine.
1017  state.GetRejectReason(), state.GetDebugMessage());
1018  }
1019  return false; // state filled in by CheckInputScripts
1020  }
1021 
1022  return true;
1023 }
1024 
1025 bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws)
1026 {
1028  AssertLockHeld(m_pool.cs);
1029  const CTransaction& tx = *ws.m_ptx;
1030  const uint256& hash = ws.m_hash;
1031  TxValidationState& state = ws.m_state;
1032 
1033  // Check again against the current block tip's script verification
1034  // flags to cache our script execution flags. This is, of course,
1035  // useless if the next block has different script flags from the
1036  // previous one, but because the cache tracks script flags for us it
1037  // will auto-invalidate and we'll just have a few blocks of extra
1038  // misses on soft-fork activation.
1039  //
1040  // This is also useful in case of bugs in the standard flags that cause
1041  // transactions to pass as valid when they're actually invalid. For
1042  // instance the STRICTENC flag was incorrectly allowing certain
1043  // CHECKSIG NOT scripts to pass, even though they were invalid.
1044  //
1045  // There is a similar check in CreateNewBlock() to prevent creating
1046  // invalid blocks (using TestBlockValidity), however allowing such
1047  // transactions into the mempool can be exploited as a DoS attack.
1048  unsigned int currentBlockScriptVerifyFlags{GetBlockScriptFlags(*m_active_chainstate.m_chain.Tip(), m_active_chainstate.m_chainman)};
1049  if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags,
1050  ws.m_precomputed_txdata, m_active_chainstate.CoinsTip())) {
1051  LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s\n", hash.ToString(), state.ToString());
1052  return Assume(false);
1053  }
1054 
1055  return true;
1056 }
1057 
1058 bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
1059 {
1061  AssertLockHeld(m_pool.cs);
1062  const CTransaction& tx = *ws.m_ptx;
1063  const uint256& hash = ws.m_hash;
1064  TxValidationState& state = ws.m_state;
1065  const bool bypass_limits = args.m_bypass_limits;
1066 
1067  std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
1068 
1069  // Remove conflicting transactions from the mempool
1070  for (CTxMemPool::txiter it : ws.m_all_conflicting)
1071  {
1072  LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n",
1073  it->GetTx().GetHash().ToString(),
1074  hash.ToString(),
1075  FormatMoney(ws.m_modified_fees - ws.m_conflicting_fees),
1076  (int)entry->GetTxSize() - (int)ws.m_conflicting_size);
1077  TRACE7(mempool, replaced,
1078  it->GetTx().GetHash().data(),
1079  it->GetTxSize(),
1080  it->GetFee(),
1081  std::chrono::duration_cast<std::chrono::duration<std::uint64_t>>(it->GetTime()).count(),
1082  hash.data(),
1083  entry->GetTxSize(),
1084  entry->GetFee()
1085  );
1086  ws.m_replaced_transactions.push_back(it->GetSharedTx());
1087  }
1088  m_pool.RemoveStaged(ws.m_all_conflicting, false, MemPoolRemovalReason::REPLACED);
1089 
1090  // This transaction should only count for fee estimation if:
1091  // - it's not being re-added during a reorg which bypasses typical mempool fee limits
1092  // - the node is not behind
1093  // - the transaction is not dependent on any other transactions in the mempool
1094  // - it's not part of a package. Since package relay is not currently supported, this
1095  // transaction has not necessarily been accepted to miners' mempools.
1096  bool validForFeeEstimation = !bypass_limits && !args.m_package_submission && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx);
1097 
1098  // Store transaction in memory
1099  m_pool.addUnchecked(*entry, ws.m_ancestors, validForFeeEstimation);
1100 
1101  // trim mempool and check if tx was trimmed
1102  // If we are validating a package, don't trim here because we could evict a previous transaction
1103  // in the package. LimitMempoolSize() should be called at the very end to make sure the mempool
1104  // is still within limits and package submission happens atomically.
1105  if (!args.m_package_submission && !bypass_limits) {
1106  LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip());
1107  if (!m_pool.exists(GenTxid::Txid(hash)))
1108  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
1109  }
1110  return true;
1111 }
1112 
1113 bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces,
1114  PackageValidationState& package_state,
1115  std::map<const uint256, const MempoolAcceptResult>& results)
1116 {
1118  AssertLockHeld(m_pool.cs);
1119  // Sanity check: none of the transactions should be in the mempool, and none of the transactions
1120  // should have a same-txid-different-witness equivalent in the mempool.
1121  assert(std::all_of(workspaces.cbegin(), workspaces.cend(), [this](const auto& ws){
1122  return !m_pool.exists(GenTxid::Txid(ws.m_ptx->GetHash())); }));
1123 
1124  bool all_submitted = true;
1125  // ConsensusScriptChecks adds to the script cache and is therefore consensus-critical;
1126  // CheckInputsFromMempoolAndCache asserts that transactions only spend coins available from the
1127  // mempool or UTXO set. Submit each transaction to the mempool immediately after calling
1128  // ConsensusScriptChecks to make the outputs available for subsequent transactions.
1129  for (Workspace& ws : workspaces) {
1130  if (!ConsensusScriptChecks(args, ws)) {
1131  results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
1132  // Since PolicyScriptChecks() passed, this should never fail.
1133  Assume(false);
1134  all_submitted = false;
1136  strprintf("BUG! PolicyScriptChecks succeeded but ConsensusScriptChecks failed: %s",
1137  ws.m_ptx->GetHash().ToString()));
1138  }
1139 
1140  // Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the
1141  // last calculation done in PreChecks, since package ancestors have already been submitted.
1142  {
1143  auto ancestors{m_pool.CalculateMemPoolAncestors(*ws.m_entry, m_limits)};
1144  if(!ancestors) {
1145  results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
1146  // Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail.
1147  Assume(false);
1148  all_submitted = false;
1150  strprintf("BUG! Mempool ancestors or descendants were underestimated: %s",
1151  ws.m_ptx->GetHash().ToString()));
1152  }
1153  ws.m_ancestors = std::move(ancestors).value_or(ws.m_ancestors);
1154  }
1155  // If we call LimitMempoolSize() for each individual Finalize(), the mempool will not take
1156  // the transaction's descendant feerate into account because it hasn't seen them yet. Also,
1157  // we risk evicting a transaction that a subsequent package transaction depends on. Instead,
1158  // allow the mempool to temporarily bypass limits, the maximum package size) while
1159  // submitting transactions individually and then trim at the very end.
1160  if (!Finalize(args, ws)) {
1161  results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
1162  // Since LimitMempoolSize() won't be called, this should never fail.
1163  Assume(false);
1164  all_submitted = false;
1166  strprintf("BUG! Adding to mempool failed: %s", ws.m_ptx->GetHash().ToString()));
1167  }
1168  }
1169 
1170  // It may or may not be the case that all the transactions made it into the mempool. Regardless,
1171  // make sure we haven't exceeded max mempool size.
1172  LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip());
1173 
1174  std::vector<uint256> all_package_wtxids;
1175  all_package_wtxids.reserve(workspaces.size());
1176  std::transform(workspaces.cbegin(), workspaces.cend(), std::back_inserter(all_package_wtxids),
1177  [](const auto& ws) { return ws.m_ptx->GetWitnessHash(); });
1178  // Find the wtxids of the transactions that made it into the mempool. Allow partial submission,
1179  // but don't report success unless they all made it into the mempool.
1180  for (Workspace& ws : workspaces) {
1181  const auto effective_feerate = args.m_package_feerates ? ws.m_package_feerate :
1182  CFeeRate{ws.m_modified_fees, static_cast<uint32_t>(ws.m_vsize)};
1183  const auto effective_feerate_wtxids = args.m_package_feerates ? all_package_wtxids :
1184  std::vector<uint256>({ws.m_ptx->GetWitnessHash()});
1185  if (m_pool.exists(GenTxid::Wtxid(ws.m_ptx->GetWitnessHash()))) {
1186  results.emplace(ws.m_ptx->GetWitnessHash(),
1187  MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize,
1188  ws.m_base_fees, effective_feerate, effective_feerate_wtxids));
1189  GetMainSignals().TransactionAddedToMempool(ws.m_ptx, m_pool.GetAndIncrementSequence());
1190  } else {
1191  all_submitted = false;
1192  ws.m_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
1193  results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
1194  }
1195  }
1196  return all_submitted;
1197 }
1198 
1199 MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
1200 {
1202  LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
1203 
1204  Workspace ws(ptx);
1205 
1206  if (!PreChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
1207 
1208  if (m_rbf && !ReplacementChecks(ws)) return MempoolAcceptResult::Failure(ws.m_state);
1209 
1210  // Perform the inexpensive checks first and avoid hashing and signature verification unless
1211  // those checks pass, to mitigate CPU exhaustion denial-of-service attacks.
1212  if (!PolicyScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
1213 
1214  if (!ConsensusScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
1215 
1216  const CFeeRate effective_feerate{ws.m_modified_fees, static_cast<uint32_t>(ws.m_vsize)};
1217  const std::vector<uint256> single_wtxid{ws.m_ptx->GetWitnessHash()};
1218  // Tx was accepted, but not added
1219  if (args.m_test_accept) {
1220  return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize,
1221  ws.m_base_fees, effective_feerate, single_wtxid);
1222  }
1223 
1224  if (!Finalize(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
1225 
1226  GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence());
1227 
1228  return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees,
1229  effective_feerate, single_wtxid);
1230 }
1231 
1232 PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args)
1233 {
1235 
1236  // These context-free package limits can be done before taking the mempool lock.
1237  PackageValidationState package_state;
1238  if (!CheckPackage(txns, package_state)) return PackageMempoolAcceptResult(package_state, {});
1239 
1240  std::vector<Workspace> workspaces{};
1241  workspaces.reserve(txns.size());
1242  std::transform(txns.cbegin(), txns.cend(), std::back_inserter(workspaces),
1243  [](const auto& tx) { return Workspace(tx); });
1244  std::map<const uint256, const MempoolAcceptResult> results;
1245 
1246  LOCK(m_pool.cs);
1247 
1248  // Do all PreChecks first and fail fast to avoid running expensive script checks when unnecessary.
1249  for (Workspace& ws : workspaces) {
1250  if (!PreChecks(args, ws)) {
1251  package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
1252  // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
1253  results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
1254  return PackageMempoolAcceptResult(package_state, std::move(results));
1255  }
1256  // Make the coins created by this transaction available for subsequent transactions in the
1257  // package to spend. Since we already checked conflicts in the package and we don't allow
1258  // replacements, we don't need to track the coins spent. Note that this logic will need to be
1259  // updated if package replace-by-fee is allowed in the future.
1260  assert(!args.m_allow_replacement);
1261  m_viewmempool.PackageAddTransaction(ws.m_ptx);
1262  }
1263 
1264  // Transactions must meet two minimum feerates: the mempool minimum fee and min relay fee.
1265  // For transactions consisting of exactly one child and its parents, it suffices to use the
1266  // package feerate (total modified fees / total virtual size) to check this requirement.
1267  const auto m_total_vsize = std::accumulate(workspaces.cbegin(), workspaces.cend(), int64_t{0},
1268  [](int64_t sum, auto& ws) { return sum + ws.m_vsize; });
1269  const auto m_total_modified_fees = std::accumulate(workspaces.cbegin(), workspaces.cend(), CAmount{0},
1270  [](CAmount sum, auto& ws) { return sum + ws.m_modified_fees; });
1271  const CFeeRate package_feerate(m_total_modified_fees, m_total_vsize);
1272  TxValidationState placeholder_state;
1273  if (args.m_package_feerates &&
1274  !CheckFeeRate(m_total_vsize, m_total_modified_fees, placeholder_state)) {
1275  package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-fee-too-low");
1276  return PackageMempoolAcceptResult(package_state, {});
1277  }
1278 
1279  // Apply package mempool ancestor/descendant limits. Skip if there is only one transaction,
1280  // because it's unnecessary. Also, CPFP carve out can increase the limit for individual
1281  // transactions, but this exemption is not extended to packages in CheckPackageLimits().
1282  std::string err_string;
1283  if (txns.size() > 1 && !PackageMempoolChecks(txns, package_state)) {
1284  return PackageMempoolAcceptResult(package_state, std::move(results));
1285  }
1286 
1287  std::vector<uint256> all_package_wtxids;
1288  all_package_wtxids.reserve(workspaces.size());
1289  std::transform(workspaces.cbegin(), workspaces.cend(), std::back_inserter(all_package_wtxids),
1290  [](const auto& ws) { return ws.m_ptx->GetWitnessHash(); });
1291  for (Workspace& ws : workspaces) {
1292  ws.m_package_feerate = package_feerate;
1293  if (!PolicyScriptChecks(args, ws)) {
1294  // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
1295  package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
1296  results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
1297  return PackageMempoolAcceptResult(package_state, std::move(results));
1298  }
1299  if (args.m_test_accept) {
1300  const auto effective_feerate = args.m_package_feerates ? ws.m_package_feerate :
1301  CFeeRate{ws.m_modified_fees, static_cast<uint32_t>(ws.m_vsize)};
1302  const auto effective_feerate_wtxids = args.m_package_feerates ? all_package_wtxids :
1303  std::vector<uint256>{ws.m_ptx->GetWitnessHash()};
1304  results.emplace(ws.m_ptx->GetWitnessHash(),
1305  MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions),
1306  ws.m_vsize, ws.m_base_fees, effective_feerate,
1307  effective_feerate_wtxids));
1308  }
1309  }
1310 
1311  if (args.m_test_accept) return PackageMempoolAcceptResult(package_state, std::move(results));
1312 
1313  if (!SubmitPackage(args, workspaces, package_state, results)) {
1314  // PackageValidationState filled in by SubmitPackage().
1315  return PackageMempoolAcceptResult(package_state, std::move(results));
1316  }
1317 
1318  return PackageMempoolAcceptResult(package_state, std::move(results));
1319 }
1320 
1321 PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package, ATMPArgs& args)
1322 {
1324  // Used if returning a PackageMempoolAcceptResult directly from this function.
1325  PackageValidationState package_state_quit_early;
1326 
1327  // Check that the package is well-formed. If it isn't, we won't try to validate any of the
1328  // transactions and thus won't return any MempoolAcceptResults, just a package-wide error.
1329 
1330  // Context-free package checks.
1331  if (!CheckPackage(package, package_state_quit_early)) return PackageMempoolAcceptResult(package_state_quit_early, {});
1332 
1333  // All transactions in the package must be a parent of the last transaction. This is just an
1334  // opportunity for us to fail fast on a context-free check without taking the mempool lock.
1335  if (!IsChildWithParents(package)) {
1336  package_state_quit_early.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-child-with-parents");
1337  return PackageMempoolAcceptResult(package_state_quit_early, {});
1338  }
1339 
1340  // IsChildWithParents() guarantees the package is > 1 transactions.
1341  assert(package.size() > 1);
1342  // The package must be 1 child with all of its unconfirmed parents. The package is expected to
1343  // be sorted, so the last transaction is the child.
1344  const auto& child = package.back();
1345  std::unordered_set<uint256, SaltedTxidHasher> unconfirmed_parent_txids;
1346  std::transform(package.cbegin(), package.cend() - 1,
1347  std::inserter(unconfirmed_parent_txids, unconfirmed_parent_txids.end()),
1348  [](const auto& tx) { return tx->GetHash(); });
1349 
1350  // All child inputs must refer to a preceding package transaction or a confirmed UTXO. The only
1351  // way to verify this is to look up the child's inputs in our current coins view (not including
1352  // mempool), and enforce that all parents not present in the package be available at chain tip.
1353  // Since this check can bring new coins into the coins cache, keep track of these coins and
1354  // uncache them if we don't end up submitting this package to the mempool.
1355  const CCoinsViewCache& coins_tip_cache = m_active_chainstate.CoinsTip();
1356  for (const auto& input : child->vin) {
1357  if (!coins_tip_cache.HaveCoinInCache(input.prevout)) {
1358  args.m_coins_to_uncache.push_back(input.prevout);
1359  }
1360  }
1361  // Using the MemPoolAccept m_view cache allows us to look up these same coins faster later.
1362  // This should be connecting directly to CoinsTip, not to m_viewmempool, because we specifically
1363  // require inputs to be confirmed if they aren't in the package.
1364  m_view.SetBackend(m_active_chainstate.CoinsTip());
1365  const auto package_or_confirmed = [this, &unconfirmed_parent_txids](const auto& input) {
1366  return unconfirmed_parent_txids.count(input.prevout.hash) > 0 || m_view.HaveCoin(input.prevout);
1367  };
1368  if (!std::all_of(child->vin.cbegin(), child->vin.cend(), package_or_confirmed)) {
1369  package_state_quit_early.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-child-with-unconfirmed-parents");
1370  return PackageMempoolAcceptResult(package_state_quit_early, {});
1371  }
1372  // Protect against bugs where we pull more inputs from disk that miss being added to
1373  // coins_to_uncache. The backend will be connected again when needed in PreChecks.
1374  m_view.SetBackend(m_dummy);
1375 
1376  LOCK(m_pool.cs);
1377  // Stores final results that won't change
1378  std::map<const uint256, const MempoolAcceptResult> results_final;
1379  // Node operators are free to set their mempool policies however they please, nodes may receive
1380  // transactions in different orders, and malicious counterparties may try to take advantage of
1381  // policy differences to pin or delay propagation of transactions. As such, it's possible for
1382  // some package transaction(s) to already be in the mempool, and we don't want to reject the
1383  // entire package in that case (as that could be a censorship vector). De-duplicate the
1384  // transactions that are already in the mempool, and only call AcceptMultipleTransactions() with
1385  // the new transactions. This ensures we don't double-count transaction counts and sizes when
1386  // checking ancestor/descendant limits, or double-count transaction fees for fee-related policy.
1387  ATMPArgs single_args = ATMPArgs::SingleInPackageAccept(args);
1388  // Results from individual validation. "Nonfinal" because if a transaction fails by itself but
1389  // succeeds later (i.e. when evaluated with a fee-bumping child), the result changes (though not
1390  // reflected in this map). If a transaction fails more than once, we want to return the first
1391  // result, when it was considered on its own. So changes will only be from invalid -> valid.
1392  std::map<uint256, MempoolAcceptResult> individual_results_nonfinal;
1393  bool quit_early{false};
1394  std::vector<CTransactionRef> txns_package_eval;
1395  for (const auto& tx : package) {
1396  const auto& wtxid = tx->GetWitnessHash();
1397  const auto& txid = tx->GetHash();
1398  // There are 3 possibilities: already in mempool, same-txid-diff-wtxid already in mempool,
1399  // or not in mempool. An already confirmed tx is treated as one not in mempool, because all
1400  // we know is that the inputs aren't available.
1401  if (m_pool.exists(GenTxid::Wtxid(wtxid))) {
1402  // Exact transaction already exists in the mempool.
1403  auto iter = m_pool.GetIter(txid);
1404  assert(iter != std::nullopt);
1405  results_final.emplace(wtxid, MempoolAcceptResult::MempoolTx(iter.value()->GetTxSize(), iter.value()->GetFee()));
1406  } else if (m_pool.exists(GenTxid::Txid(txid))) {
1407  // Transaction with the same non-witness data but different witness (same txid,
1408  // different wtxid) already exists in the mempool.
1409  //
1410  // We don't allow replacement transactions right now, so just swap the package
1411  // transaction for the mempool one. Note that we are ignoring the validity of the
1412  // package transaction passed in.
1413  // TODO: allow witness replacement in packages.
1414  auto iter = m_pool.GetIter(txid);
1415  assert(iter != std::nullopt);
1416  // Provide the wtxid of the mempool tx so that the caller can look it up in the mempool.
1417  results_final.emplace(wtxid, MempoolAcceptResult::MempoolTxDifferentWitness(iter.value()->GetTx().GetWitnessHash()));
1418  } else {
1419  // Transaction does not already exist in the mempool.
1420  // Try submitting the transaction on its own.
1421  const auto single_res = AcceptSingleTransaction(tx, single_args);
1422  if (single_res.m_result_type == MempoolAcceptResult::ResultType::VALID) {
1423  // The transaction succeeded on its own and is now in the mempool. Don't include it
1424  // in package validation, because its fees should only be "used" once.
1425  assert(m_pool.exists(GenTxid::Wtxid(wtxid)));
1426  results_final.emplace(wtxid, single_res);
1427  } else if (single_res.m_state.GetResult() != TxValidationResult::TX_MEMPOOL_POLICY &&
1428  single_res.m_state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
1429  // Package validation policy only differs from individual policy in its evaluation
1430  // of feerate. For example, if a transaction fails here due to violation of a
1431  // consensus rule, the result will not change when it is submitted as part of a
1432  // package. To minimize the amount of repeated work, unless the transaction fails
1433  // due to feerate or missing inputs (its parent is a previous transaction in the
1434  // package that failed due to feerate), don't run package validation. Note that this
1435  // decision might not make sense if different types of packages are allowed in the
1436  // future. Continue individually validating the rest of the transactions, because
1437  // some of them may still be valid.
1438  quit_early = true;
1439  package_state_quit_early.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
1440  individual_results_nonfinal.emplace(wtxid, single_res);
1441  } else {
1442  individual_results_nonfinal.emplace(wtxid, single_res);
1443  txns_package_eval.push_back(tx);
1444  }
1445  }
1446  }
1447 
1448  // Quit early because package validation won't change the result or the entire package has
1449  // already been submitted.
1450  if (quit_early || txns_package_eval.empty()) {
1451  for (const auto& [wtxid, mempoolaccept_res] : individual_results_nonfinal) {
1452  Assume(results_final.emplace(wtxid, mempoolaccept_res).second);
1453  Assume(mempoolaccept_res.m_result_type == MempoolAcceptResult::ResultType::INVALID);
1454  }
1455  return PackageMempoolAcceptResult(package_state_quit_early, std::move(results_final));
1456  }
1457  // Validate the (deduplicated) transactions as a package. Note that submission_result has its
1458  // own PackageValidationState; package_state_quit_early is unused past this point.
1459  auto submission_result = AcceptMultipleTransactions(txns_package_eval, args);
1460  // Include already-in-mempool transaction results in the final result.
1461  for (const auto& [wtxid, mempoolaccept_res] : results_final) {
1462  Assume(submission_result.m_tx_results.emplace(wtxid, mempoolaccept_res).second);
1463  Assume(mempoolaccept_res.m_result_type != MempoolAcceptResult::ResultType::INVALID);
1464  }
1465  if (submission_result.m_state.GetResult() == PackageValidationResult::PCKG_TX) {
1466  // Package validation failed because one or more transactions failed. Provide a result for
1467  // each transaction; if AcceptMultipleTransactions() didn't return a result for a tx,
1468  // include the previous individual failure reason.
1469  submission_result.m_tx_results.insert(individual_results_nonfinal.cbegin(),
1470  individual_results_nonfinal.cend());
1471  Assume(submission_result.m_tx_results.size() == package.size());
1472  }
1473  return submission_result;
1474 }
1475 
1476 } // anon namespace
1477 
1479  int64_t accept_time, bool bypass_limits, bool test_accept)
1481 {
1483  const CChainParams& chainparams{active_chainstate.m_chainman.GetParams()};
1484  assert(active_chainstate.GetMempool() != nullptr);
1485  CTxMemPool& pool{*active_chainstate.GetMempool()};
1486 
1487  std::vector<COutPoint> coins_to_uncache;
1488  auto args = MemPoolAccept::ATMPArgs::SingleAccept(chainparams, accept_time, bypass_limits, coins_to_uncache, test_accept);
1489  MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args);
1491  // Remove coins that were not present in the coins cache before calling
1492  // AcceptSingleTransaction(); this is to prevent memory DoS in case we receive a large
1493  // number of invalid transactions that attempt to overrun the in-memory coins cache
1494  // (`CCoinsViewCache::cacheCoins`).
1495 
1496  for (const COutPoint& hashTx : coins_to_uncache)
1497  active_chainstate.CoinsTip().Uncache(hashTx);
1498  TRACE2(mempool, rejected,
1499  tx->GetHash().data(),
1500  result.m_state.GetRejectReason().c_str()
1501  );
1502  }
1503  // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
1504  BlockValidationState state_dummy;
1505  active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
1506  return result;
1507 }
1508 
1510  const Package& package, bool test_accept)
1511 {
1513  assert(!package.empty());
1514  assert(std::all_of(package.cbegin(), package.cend(), [](const auto& tx){return tx != nullptr;}));
1515 
1516  std::vector<COutPoint> coins_to_uncache;
1517  const CChainParams& chainparams = active_chainstate.m_chainman.GetParams();
1518  auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1520  if (test_accept) {
1521  auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(chainparams, GetTime(), coins_to_uncache);
1522  return MemPoolAccept(pool, active_chainstate).AcceptMultipleTransactions(package, args);
1523  } else {
1524  auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(chainparams, GetTime(), coins_to_uncache);
1525  return MemPoolAccept(pool, active_chainstate).AcceptPackage(package, args);
1526  }
1527  }();
1528 
1529  // Uncache coins pertaining to transactions that were not submitted to the mempool.
1530  if (test_accept || result.m_state.IsInvalid()) {
1531  for (const COutPoint& hashTx : coins_to_uncache) {
1532  active_chainstate.CoinsTip().Uncache(hashTx);
1533  }
1534  }
1535  // Ensure the coins cache is still within limits.
1536  BlockValidationState state_dummy;
1537  active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
1538  return result;
1539 }
1540 
1541 CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
1542 {
1543  int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
1544  // Force block reward to zero when right shift is undefined.
1545  if (halvings >= 64)
1546  return 0;
1547 
1548  CAmount nSubsidy = 50 * COIN;
1549  // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1550  nSubsidy >>= halvings;
1551  return nSubsidy;
1552 }
1553 
1555  : m_dbview{std::move(db_params), std::move(options)},
1556  m_catcherview(&m_dbview) {}
1557 
1558 void CoinsViews::InitCache()
1559 {
1561  m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
1562 }
1563 
1565  CTxMemPool* mempool,
1566  BlockManager& blockman,
1567  ChainstateManager& chainman,
1568  std::optional<uint256> from_snapshot_blockhash)
1569  : m_mempool(mempool),
1570  m_blockman(blockman),
1571  m_chainman(chainman),
1572  m_from_snapshot_blockhash(from_snapshot_blockhash) {}
1573 
1575  size_t cache_size_bytes,
1576  bool in_memory,
1577  bool should_wipe,
1578  fs::path leveldb_name)
1579 {
1581  leveldb_name += node::SNAPSHOT_CHAINSTATE_SUFFIX;
1582  }
1583 
1584  m_coins_views = std::make_unique<CoinsViews>(
1585  DBParams{
1586  .path = m_chainman.m_options.datadir / leveldb_name,
1587  .cache_bytes = cache_size_bytes,
1588  .memory_only = in_memory,
1589  .wipe_data = should_wipe,
1590  .obfuscate = true,
1591  .options = m_chainman.m_options.coins_db},
1593 }
1594 
1595 void Chainstate::InitCoinsCache(size_t cache_size_bytes)
1596 {
1598  assert(m_coins_views != nullptr);
1599  m_coinstip_cache_size_bytes = cache_size_bytes;
1600  m_coins_views->InitCache();
1601 }
1602 
1603 // Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
1604 // is a performance-related implementation detail. This function must be marked
1605 // `const` so that `CValidationInterface` clients (which are given a `const Chainstate*`)
1606 // can call it.
1607 //
1608 bool Chainstate::IsInitialBlockDownload() const
1609 {
1610  // Optimization: pre-test latch before taking the lock.
1611  if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1612  return false;
1613 
1614  LOCK(cs_main);
1615  if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1616  return false;
1618  return true;
1619  }
1620  if (m_chain.Tip() == nullptr)
1621  return true;
1623  return true;
1624  }
1625  if (m_chain.Tip()->Time() < Now<NodeSeconds>() - m_chainman.m_options.max_tip_age) {
1626  return true;
1627  }
1628  LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
1629  m_cached_finished_ibd.store(true, std::memory_order_relaxed);
1630  return false;
1631 }
1632 
1633 static void AlertNotify(const std::string& strMessage)
1634 {
1635  uiInterface.NotifyAlertChanged();
1636 #if HAVE_SYSTEM
1637  std::string strCmd = gArgs.GetArg("-alertnotify", "");
1638  if (strCmd.empty()) return;
1639 
1640  // Alert text should be plain ascii coming from a trusted source, but to
1641  // be safe we first strip anything not in safeChars, then add single quotes around
1642  // the whole string before passing it to the shell:
1643  std::string singleQuote("'");
1644  std::string safeStatus = SanitizeString(strMessage);
1645  safeStatus = singleQuote+safeStatus+singleQuote;
1646  ReplaceAll(strCmd, "%s", safeStatus);
1647 
1648  std::thread t(runCommand, strCmd);
1649  t.detach(); // thread runs free
1650 #endif
1651 }
1652 
1654 {
1656 
1657  // Before we get past initial download, we cannot reliably alert about forks
1658  // (we assume we don't get stuck on a fork before finishing our initial sync)
1659  if (IsInitialBlockDownload()) {
1660  return;
1661  }
1662 
1663  if (m_chainman.m_best_invalid && m_chainman.m_best_invalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)) {
1664  LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
1666  } else {
1668  }
1669 }
1670 
1671 // Called both upon regular invalid block discovery *and* InvalidateBlock
1673 {
1675  if (!m_chainman.m_best_invalid || pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork) {
1676  m_chainman.m_best_invalid = pindexNew;
1677  }
1678  if (m_chainman.m_best_header != nullptr && m_chainman.m_best_header->GetAncestor(pindexNew->nHeight) == pindexNew) {
1680  }
1681 
1682  LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n", __func__,
1683  pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
1684  log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
1685  CBlockIndex *tip = m_chain.Tip();
1686  assert (tip);
1687  LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n", __func__,
1688  tip->GetBlockHash().ToString(), m_chain.Height(), log(tip->nChainWork.getdouble())/log(2.0),
1691 }
1692 
1693 // Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
1694 // which does its own setBlockIndexCandidates management.
1696 {
1699  pindex->nStatus |= BLOCK_FAILED_VALID;
1700  m_chainman.m_failed_blocks.insert(pindex);
1701  m_blockman.m_dirty_blockindex.insert(pindex);
1702  setBlockIndexCandidates.erase(pindex);
1703  InvalidChainFound(pindex);
1704  }
1705 }
1706 
1707 void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
1708 {
1709  // mark inputs spent
1710  if (!tx.IsCoinBase()) {
1711  txundo.vprevout.reserve(tx.vin.size());
1712  for (const CTxIn &txin : tx.vin) {
1713  txundo.vprevout.emplace_back();
1714  bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
1715  assert(is_spent);
1716  }
1717  }
1718  // add outputs
1719  AddCoins(inputs, tx, nHeight);
1720 }
1721 
1723  const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1724  const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
1726 }
1727 
1730 
1731 bool InitScriptExecutionCache(size_t max_size_bytes)
1732 {
1733  // Setup the salted hasher
1735  // We want the nonce to be 64 bytes long to force the hasher to process
1736  // this chunk, which makes later hash computations more efficient. We
1737  // just write our 32-byte entropy twice to fill the 64 bytes.
1740 
1741  auto setup_results = g_scriptExecutionCache.setup_bytes(max_size_bytes);
1742  if (!setup_results) return false;
1743 
1744  const auto [num_elems, approx_size_bytes] = *setup_results;
1745  LogPrintf("Using %zu MiB out of %zu MiB requested for script execution cache, able to store %zu elements\n",
1746  approx_size_bytes >> 20, max_size_bytes >> 20, num_elems);
1747  return true;
1748 }
1749 
1770  const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
1771  bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
1772  std::vector<CScriptCheck>* pvChecks)
1773 {
1774  if (tx.IsCoinBase()) return true;
1775 
1776  if (pvChecks) {
1777  pvChecks->reserve(tx.vin.size());
1778  }
1779 
1780  // First check if script executions have been cached with the same
1781  // flags. Note that this assumes that the inputs provided are
1782  // correct (ie that the transaction hash which is in tx's prevouts
1783  // properly commits to the scriptPubKey in the inputs view of that
1784  // transaction).
1785  uint256 hashCacheEntry;
1787  hasher.Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
1788  AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
1789  if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
1790  return true;
1791  }
1792 
1793  if (!txdata.m_spent_outputs_ready) {
1794  std::vector<CTxOut> spent_outputs;
1795  spent_outputs.reserve(tx.vin.size());
1796 
1797  for (const auto& txin : tx.vin) {
1798  const COutPoint& prevout = txin.prevout;
1799  const Coin& coin = inputs.AccessCoin(prevout);
1800  assert(!coin.IsSpent());
1801  spent_outputs.emplace_back(coin.out);
1802  }
1803  txdata.Init(tx, std::move(spent_outputs));
1804  }
1805  assert(txdata.m_spent_outputs.size() == tx.vin.size());
1806 
1807  for (unsigned int i = 0; i < tx.vin.size(); i++) {
1808 
1809  // We very carefully only pass in things to CScriptCheck which
1810  // are clearly committed to by tx' witness hash. This provides
1811  // a sanity check that our caching is not introducing consensus
1812  // failures through additional data in, eg, the coins being
1813  // spent being checked as a part of CScriptCheck.
1814 
1815  // Verify signature
1816  CScriptCheck check(txdata.m_spent_outputs[i], tx, i, flags, cacheSigStore, &txdata);
1817  if (pvChecks) {
1818  pvChecks->emplace_back(std::move(check));
1819  } else if (!check()) {
1821  // Check whether the failure was caused by a
1822  // non-mandatory script verification check, such as
1823  // non-standard DER encodings or non-null dummy
1824  // arguments; if so, ensure we return NOT_STANDARD
1825  // instead of CONSENSUS to avoid downstream users
1826  // splitting the network between upgraded and
1827  // non-upgraded nodes by banning CONSENSUS-failing
1828  // data providers.
1829  CScriptCheck check2(txdata.m_spent_outputs[i], tx, i,
1830  flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
1831  if (check2())
1832  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
1833  }
1834  // MANDATORY flag failures correspond to
1835  // TxValidationResult::TX_CONSENSUS. Because CONSENSUS
1836  // failures are the most serious case of validation
1837  // failures, we may need to consider using
1838  // RECENT_CONSENSUS_CHANGE for any script failure that
1839  // could be due to non-upgraded nodes which we may want to
1840  // support, to avoid splitting the network (but this
1841  // depends on the details of how net_processing handles
1842  // such errors).
1843  return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
1844  }
1845  }
1846 
1847  if (cacheFullScriptStore && !pvChecks) {
1848  // We executed all of the provided scripts, and were told to
1849  // cache the result. Do so now.
1850  g_scriptExecutionCache.insert(hashCacheEntry);
1851  }
1852 
1853  return true;
1854 }
1855 
1856 bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage)
1857 {
1858  AbortNode(strMessage, userMessage);
1859  return state.Error(strMessage);
1860 }
1861 
1869 int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
1870 {
1871  bool fClean = true;
1872 
1873  if (view.HaveCoin(out)) fClean = false; // overwriting transaction output
1874 
1875  if (undo.nHeight == 0) {
1876  // Missing undo metadata (height and coinbase). Older versions included this
1877  // information only in undo records for the last spend of a transactions'
1878  // outputs. This implies that it must be present for some other output of the same tx.
1879  const Coin& alternate = AccessByTxid(view, out.hash);
1880  if (!alternate.IsSpent()) {
1881  undo.nHeight = alternate.nHeight;
1882  undo.fCoinBase = alternate.fCoinBase;
1883  } else {
1884  return DISCONNECT_FAILED; // adding output for transaction without known metadata
1885  }
1886  }
1887  // If the coin already exists as an unspent coin in the cache, then the
1888  // possible_overwrite parameter to AddCoin must be set to true. We have
1889  // already checked whether an unspent coin exists above using HaveCoin, so
1890  // we don't need to guess. When fClean is false, an unspent coin already
1891  // existed and it is an overwrite.
1892  view.AddCoin(out, std::move(undo), !fClean);
1893 
1894  return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1895 }
1896 
1899 DisconnectResult Chainstate::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view)
1900 {
1902  bool fClean = true;
1903 
1904  CBlockUndo blockUndo;
1905  if (!UndoReadFromDisk(blockUndo, pindex)) {
1906  error("DisconnectBlock(): failure reading undo data");
1907  return DISCONNECT_FAILED;
1908  }
1909 
1910  if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1911  error("DisconnectBlock(): block and undo data inconsistent");
1912  return DISCONNECT_FAILED;
1913  }
1914 
1915  // Ignore blocks that contain transactions which are 'overwritten' by later transactions,
1916  // unless those are already completely spent.
1917  // See https://github.com/bitcoin/bitcoin/issues/22596 for additional information.
1918  // Note: the blocks specified here are different than the ones used in ConnectBlock because DisconnectBlock
1919  // unwinds the blocks in reverse. As a result, the inconsistency is not discovered until the earlier
1920  // blocks with the duplicate coinbase transactions are disconnected.
1921  bool fEnforceBIP30 = !((pindex->nHeight==91722 && pindex->GetBlockHash() == uint256S("0x00000000000271a2dc26e7667f8419f2e15416dc6955e5a6c6cdf3f2574dd08e")) ||
1922  (pindex->nHeight==91812 && pindex->GetBlockHash() == uint256S("0x00000000000af0aed4792b1acee3d966af36cf5def14935db8de83d6f9306f2f")));
1923 
1924  // undo transactions in reverse order
1925  for (int i = block.vtx.size() - 1; i >= 0; i--) {
1926  const CTransaction &tx = *(block.vtx[i]);
1927  uint256 hash = tx.GetHash();
1928  bool is_coinbase = tx.IsCoinBase();
1929  bool is_bip30_exception = (is_coinbase && !fEnforceBIP30);
1930 
1931  // Check that all outputs are available and match the outputs in the block itself
1932  // exactly.
1933  for (size_t o = 0; o < tx.vout.size(); o++) {
1934  if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
1935  COutPoint out(hash, o);
1936  Coin coin;
1937  bool is_spent = view.SpendCoin(out, &coin);
1938  if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
1939  if (!is_bip30_exception) {
1940  fClean = false; // transaction output mismatch
1941  }
1942  }
1943  }
1944  }
1945 
1946  // restore inputs
1947  if (i > 0) { // not coinbases
1948  CTxUndo &txundo = blockUndo.vtxundo[i-1];
1949  if (txundo.vprevout.size() != tx.vin.size()) {
1950  error("DisconnectBlock(): transaction and undo data inconsistent");
1951  return DISCONNECT_FAILED;
1952  }
1953  for (unsigned int j = tx.vin.size(); j > 0;) {
1954  --j;
1955  const COutPoint& out = tx.vin[j].prevout;
1956  int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out);
1957  if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED;
1958  fClean = fClean && res != DISCONNECT_UNCLEAN;
1959  }
1960  // At this point, all of txundo.vprevout should have been moved out.
1961  }
1962  }
1963 
1964  // move best block pointer to prevout block
1965  view.SetBestBlock(pindex->pprev->GetBlockHash());
1966 
1967  return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1968 }
1969 
1971 
1972 void StartScriptCheckWorkerThreads(int threads_num)
1973 {
1974  scriptcheckqueue.StartWorkerThreads(threads_num);
1975 }
1976 
1978 {
1979  scriptcheckqueue.StopWorkerThreads();
1980 }
1981 
1986 {
1987 private:
1989  int m_bit;
1990 
1991 public:
1992  explicit WarningBitsConditionChecker(const ChainstateManager& chainman, int bit) : m_chainman{chainman}, m_bit(bit) {}
1993 
1994  int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
1995  int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
1996  int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
1997  int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
1998 
1999  bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
2000  {
2001  return pindex->nHeight >= params.MinBIP9WarningHeight &&
2003  ((pindex->nVersion >> m_bit) & 1) != 0 &&
2004  ((m_chainman.m_versionbitscache.ComputeBlockVersion(pindex->pprev, params) >> m_bit) & 1) == 0;
2005  }
2006 };
2007 
2008 static std::array<ThresholdConditionCache, VERSIONBITS_NUM_BITS> warningcache GUARDED_BY(cs_main);
2009 
2010 static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const ChainstateManager& chainman)
2011 {
2012  const Consensus::Params& consensusparams = chainman.GetConsensus();
2013 
2014  // BIP16 didn't become active until Apr 1 2012 (on mainnet, and
2015  // retroactively applied to testnet)
2016  // However, only one historical block violated the P2SH rules (on both
2017  // mainnet and testnet).
2018  // Similarly, only one historical block violated the TAPROOT rules on
2019  // mainnet.
2020  // For simplicity, always leave P2SH+WITNESS+TAPROOT on except for the two
2021  // violating blocks.
2023  const auto it{consensusparams.script_flag_exceptions.find(*Assert(block_index.phashBlock))};
2024  if (it != consensusparams.script_flag_exceptions.end()) {
2025  flags = it->second;
2026  }
2027 
2028  // Enforce the DERSIG (BIP66) rule
2029  if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_DERSIG)) {
2031  }
2032 
2033  // Enforce CHECKLOCKTIMEVERIFY (BIP65)
2034  if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_CLTV)) {
2036  }
2037 
2038  // Enforce CHECKSEQUENCEVERIFY (BIP112)
2039  if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_CSV)) {
2041  }
2042 
2043  // Enforce BIP147 NULLDUMMY (activated simultaneously with segwit)
2044  if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_SEGWIT)) {
2046  }
2047 
2048  return flags;
2049 }
2050 
2051 
2052 static SteadyClock::duration time_check{};
2053 static SteadyClock::duration time_forks{};
2054 static SteadyClock::duration time_connect{};
2055 static SteadyClock::duration time_verify{};
2056 static SteadyClock::duration time_undo{};
2057 static SteadyClock::duration time_index{};
2058 static SteadyClock::duration time_total{};
2059 static int64_t num_blocks_total = 0;
2060 
2064 bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, CBlockIndex* pindex,
2065  CCoinsViewCache& view, bool fJustCheck)
2066 {
2068  assert(pindex);
2069 
2070  uint256 block_hash{block.GetHash()};
2071  assert(*pindex->phashBlock == block_hash);
2072  const bool parallel_script_checks{scriptcheckqueue.HasThreads()};
2073 
2074  const auto time_start{SteadyClock::now()};
2075  const CChainParams& params{m_chainman.GetParams()};
2076 
2077  // Check it again in case a previous version let a bad block in
2078  // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
2079  // ContextualCheckBlockHeader() here. This means that if we add a new
2080  // consensus rule that is enforced in one of those two functions, then we
2081  // may have let in a block that violates the rule prior to updating the
2082  // software, and we would NOT be enforcing the rule here. Fully solving
2083  // upgrade from one software version to the next after a consensus rule
2084  // change is potentially tricky and issue-specific (see NeedsRedownload()
2085  // for one approach that was used for BIP 141 deployment).
2086  // Also, currently the rule against blocks more than 2 hours in the future
2087  // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
2088  // re-enforce that rule here (at least until we make it impossible for
2089  // m_adjusted_time_callback() to go backward).
2090  if (!CheckBlock(block, state, params.GetConsensus(), !fJustCheck, !fJustCheck)) {
2092  // We don't write down blocks to disk if they may have been
2093  // corrupted, so this should be impossible unless we're having hardware
2094  // problems.
2095  return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
2096  }
2097  return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
2098  }
2099 
2100  // verify that the view's current state corresponds to the previous block
2101  uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
2102  assert(hashPrevBlock == view.GetBestBlock());
2103 
2104  num_blocks_total++;
2105 
2106  // Special case for the genesis block, skipping connection of its transactions
2107  // (its coinbase is unspendable)
2108  if (block_hash == params.GetConsensus().hashGenesisBlock) {
2109  if (!fJustCheck)
2110  view.SetBestBlock(pindex->GetBlockHash());
2111  return true;
2112  }
2113 
2114  bool fScriptChecks = true;
2115  if (!m_chainman.AssumedValidBlock().IsNull()) {
2116  // We've been configured with the hash of a block which has been externally verified to have a valid history.
2117  // A suitable default value is included with the software and updated from time to time. Because validity
2118  // relative to a piece of software is an objective fact these defaults can be easily reviewed.
2119  // This setting doesn't force the selection of any particular chain but makes validating some faster by
2120  // effectively caching the result of part of the verification.
2121  BlockMap::const_iterator it{m_blockman.m_block_index.find(m_chainman.AssumedValidBlock())};
2122  if (it != m_blockman.m_block_index.end()) {
2123  if (it->second.GetAncestor(pindex->nHeight) == pindex &&
2124  m_chainman.m_best_header->GetAncestor(pindex->nHeight) == pindex &&
2126  // This block is a member of the assumed verified chain and an ancestor of the best header.
2127  // Script verification is skipped when connecting blocks under the
2128  // assumevalid block. Assuming the assumevalid block is valid this
2129  // is safe because block merkle hashes are still computed and checked,
2130  // Of course, if an assumed valid block is invalid due to false scriptSigs
2131  // this optimization would allow an invalid chain to be accepted.
2132  // The equivalent time check discourages hash power from extorting the network via DOS attack
2133  // into accepting an invalid block through telling users they must manually set assumevalid.
2134  // Requiring a software change or burying the invalid block, regardless of the setting, makes
2135  // it hard to hide the implication of the demand. This also avoids having release candidates
2136  // that are hardly doing any signature verification at all in testing without having to
2137  // artificially set the default assumed verified block further back.
2138  // The test against the minimum chain work prevents the skipping when denied access to any chain at
2139  // least as good as the expected chain.
2140  fScriptChecks = (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, params.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
2141  }
2142  }
2143  }
2144 
2145  const auto time_1{SteadyClock::now()};
2146  time_check += time_1 - time_start;
2147  LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n",
2148  Ticks<MillisecondsDouble>(time_1 - time_start),
2149  Ticks<SecondsDouble>(time_check),
2150  Ticks<MillisecondsDouble>(time_check) / num_blocks_total);
2151 
2152  // Do not allow blocks that contain transactions which 'overwrite' older transactions,
2153  // unless those are already completely spent.
2154  // If such overwrites are allowed, coinbases and transactions depending upon those
2155  // can be duplicated to remove the ability to spend the first instance -- even after
2156  // being sent to another address.
2157  // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
2158  // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
2159  // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
2160  // two in the chain that violate it. This prevents exploiting the issue against nodes during their
2161  // initial block download.
2162  bool fEnforceBIP30 = !IsBIP30Repeat(*pindex);
2163 
2164  // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
2165  // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
2166  // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
2167  // before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further
2168  // duplicate transactions descending from the known pairs either.
2169  // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
2170 
2171  // BIP34 requires that a block at height X (block X) has its coinbase
2172  // scriptSig start with a CScriptNum of X (indicated height X). The above
2173  // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
2174  // case that there is a block X before the BIP34 height of 227,931 which has
2175  // an indicated height Y where Y is greater than X. The coinbase for block
2176  // X would also be a valid coinbase for block Y, which could be a BIP30
2177  // violation. An exhaustive search of all mainnet coinbases before the
2178  // BIP34 height which have an indicated height greater than the block height
2179  // reveals many occurrences. The 3 lowest indicated heights found are
2180  // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
2181  // heights would be the first opportunity for BIP30 to be violated.
2182 
2183  // The search reveals a great many blocks which have an indicated height
2184  // greater than 1,983,702, so we simply remove the optimization to skip
2185  // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
2186  // that block in another 25 years or so, we should take advantage of a
2187  // future consensus change to do a new and improved version of BIP34 that
2188  // will actually prevent ever creating any duplicate coinbases in the
2189  // future.
2190  static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
2191 
2192  // There is no potential to create a duplicate coinbase at block 209,921
2193  // because this is still before the BIP34 height and so explicit BIP30
2194  // checking is still active.
2195 
2196  // The final case is block 176,684 which has an indicated height of
2197  // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
2198  // before block 490,897 so there was not much opportunity to address this
2199  // case other than to carefully analyze it and determine it would not be a
2200  // problem. Block 490,897 was, in fact, mined with a different coinbase than
2201  // block 176,684, but it is important to note that even if it hadn't been or
2202  // is remined on an alternate fork with a duplicate coinbase, we would still
2203  // not run into a BIP30 violation. This is because the coinbase for 176,684
2204  // is spent in block 185,956 in transaction
2205  // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
2206  // spending transaction can't be duplicated because it also spends coinbase
2207  // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
2208  // coinbase has an indicated height of over 4.2 billion, and wouldn't be
2209  // duplicatable until that height, and it's currently impossible to create a
2210  // chain that long. Nevertheless we may wish to consider a future soft fork
2211  // which retroactively prevents block 490,897 from creating a duplicate
2212  // coinbase. The two historical BIP30 violations often provide a confusing
2213  // edge case when manipulating the UTXO and it would be simpler not to have
2214  // another edge case to deal with.
2215 
2216  // testnet3 has no blocks before the BIP34 height with indicated heights
2217  // post BIP34 before approximately height 486,000,000. After block
2218  // 1,983,702 testnet3 starts doing unnecessary BIP30 checking again.
2219  assert(pindex->pprev);
2220  CBlockIndex* pindexBIP34height = pindex->pprev->GetAncestor(params.GetConsensus().BIP34Height);
2221  //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
2222  fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == params.GetConsensus().BIP34Hash));
2223 
2224  // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a
2225  // consensus change that ensures coinbases at those heights cannot
2226  // duplicate earlier coinbases.
2227  if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
2228  for (const auto& tx : block.vtx) {
2229  for (size_t o = 0; o < tx->vout.size(); o++) {
2230  if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
2231  LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
2232  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30");
2233  }
2234  }
2235  }
2236  }
2237 
2238  // Enforce BIP68 (sequence locks)
2239  int nLockTimeFlags = 0;
2241  nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
2242  }
2243 
2244  // Get the script flags for this block
2245  unsigned int flags{GetBlockScriptFlags(*pindex, m_chainman)};
2246 
2247  const auto time_2{SteadyClock::now()};
2248  time_forks += time_2 - time_1;
2249  LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n",
2250  Ticks<MillisecondsDouble>(time_2 - time_1),
2251  Ticks<SecondsDouble>(time_forks),
2252  Ticks<MillisecondsDouble>(time_forks) / num_blocks_total);
2253 
2254  CBlockUndo blockundo;
2255 
2256  // Precomputed transaction data pointers must not be invalidated
2257  // until after `control` has run the script checks (potentially
2258  // in multiple threads). Preallocate the vector size so a new allocation
2259  // doesn't invalidate pointers into the vector, and keep txsdata in scope
2260  // for as long as `control`.
2261  CCheckQueueControl<CScriptCheck> control(fScriptChecks && parallel_script_checks ? &scriptcheckqueue : nullptr);
2262  std::vector<PrecomputedTransactionData> txsdata(block.vtx.size());
2263 
2264  std::vector<int> prevheights;
2265  CAmount nFees = 0;
2266  int nInputs = 0;
2267  int64_t nSigOpsCost = 0;
2268  blockundo.vtxundo.reserve(block.vtx.size() - 1);
2269  for (unsigned int i = 0; i < block.vtx.size(); i++)
2270  {
2271  const CTransaction &tx = *(block.vtx[i]);
2272 
2273  nInputs += tx.vin.size();
2274 
2275  if (!tx.IsCoinBase())
2276  {
2277  CAmount txfee = 0;
2278  TxValidationState tx_state;
2279  if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) {
2280  // Any transaction validation failure in ConnectBlock is a block consensus failure
2282  tx_state.GetRejectReason(), tx_state.GetDebugMessage());
2283  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
2284  }
2285  nFees += txfee;
2286  if (!MoneyRange(nFees)) {
2287  LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__);
2288  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange");
2289  }
2290 
2291  // Check that transaction is BIP68 final
2292  // BIP68 lock checks (as opposed to nLockTime checks) must
2293  // be in ConnectBlock because they require the UTXO set
2294  prevheights.resize(tx.vin.size());
2295  for (size_t j = 0; j < tx.vin.size(); j++) {
2296  prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight;
2297  }
2298 
2299  if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
2300  LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__);
2301  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal");
2302  }
2303  }
2304 
2305  // GetTransactionSigOpCost counts 3 types of sigops:
2306  // * legacy (always)
2307  // * p2sh (when P2SH enabled in flags and excludes coinbase)
2308  // * witness (when witness enabled in flags and excludes coinbase)
2309  nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
2310  if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) {
2311  LogPrintf("ERROR: ConnectBlock(): too many sigops\n");
2312  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops");
2313  }
2314 
2315  if (!tx.IsCoinBase())
2316  {
2317  std::vector<CScriptCheck> vChecks;
2318  bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
2319  TxValidationState tx_state;
2320  if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], parallel_script_checks ? &vChecks : nullptr)) {
2321  // Any transaction validation failure in ConnectBlock is a block consensus failure
2323  tx_state.GetRejectReason(), tx_state.GetDebugMessage());
2324  return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
2325  tx.GetHash().ToString(), state.ToString());
2326  }
2327  control.Add(std::move(vChecks));
2328  }
2329 
2330  CTxUndo undoDummy;
2331  if (i > 0) {
2332  blockundo.vtxundo.push_back(CTxUndo());
2333  }
2334  UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
2335  }
2336  const auto time_3{SteadyClock::now()};
2337  time_connect += time_3 - time_2;
2338  LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(),
2339  Ticks<MillisecondsDouble>(time_3 - time_2), Ticks<MillisecondsDouble>(time_3 - time_2) / block.vtx.size(),
2340  nInputs <= 1 ? 0 : Ticks<MillisecondsDouble>(time_3 - time_2) / (nInputs - 1),
2341  Ticks<SecondsDouble>(time_connect),
2342  Ticks<MillisecondsDouble>(time_connect) / num_blocks_total);
2343 
2344  CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, params.GetConsensus());
2345  if (block.vtx[0]->GetValueOut() > blockReward) {
2346  LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
2347  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
2348  }
2349 
2350  if (!control.Wait()) {
2351  LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
2352  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
2353  }
2354  const auto time_4{SteadyClock::now()};
2355  time_verify += time_4 - time_2;
2356  LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1,
2357  Ticks<MillisecondsDouble>(time_4 - time_2),
2358  nInputs <= 1 ? 0 : Ticks<MillisecondsDouble>(time_4 - time_2) / (nInputs - 1),
2359  Ticks<SecondsDouble>(time_verify),
2360  Ticks<MillisecondsDouble>(time_verify) / num_blocks_total);
2361 
2362  if (fJustCheck)
2363  return true;
2364 
2365  if (!m_blockman.WriteUndoDataForBlock(blockundo, state, pindex, params)) {
2366  return false;
2367  }
2368 
2369  const auto time_5{SteadyClock::now()};
2370  time_undo += time_5 - time_4;
2371  LogPrint(BCLog::BENCH, " - Write undo data: %.2fms [%.2fs (%.2fms/blk)]\n",
2372  Ticks<MillisecondsDouble>(time_5 - time_4),
2373  Ticks<SecondsDouble>(time_undo),
2374  Ticks<MillisecondsDouble>(time_undo) / num_blocks_total);
2375 
2376  if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
2378  m_blockman.m_dirty_blockindex.insert(pindex);
2379  }
2380 
2381  // add this block to the view's block chain
2382  view.SetBestBlock(pindex->GetBlockHash());
2383 
2384  const auto time_6{SteadyClock::now()};
2385  time_index += time_6 - time_5;
2386  LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n",
2387  Ticks<MillisecondsDouble>(time_6 - time_5),
2388  Ticks<SecondsDouble>(time_index),
2389  Ticks<MillisecondsDouble>(time_index) / num_blocks_total);
2390 
2391  TRACE6(validation, block_connected,
2392  block_hash.data(),
2393  pindex->nHeight,
2394  block.vtx.size(),
2395  nInputs,
2396  nSigOpsCost,
2397  time_5 - time_start // in microseconds (µs)
2398  );
2399 
2400  return true;
2401 }
2402 
2403 CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState()
2404 {
2406  return this->GetCoinsCacheSizeState(
2409 }
2410 
2411 CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState(
2412  size_t max_coins_cache_size_bytes,
2413  size_t max_mempool_size_bytes)
2414 {
2416  const int64_t nMempoolUsage = m_mempool ? m_mempool->DynamicMemoryUsage() : 0;
2417  int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
2418  int64_t nTotalSpace =
2419  max_coins_cache_size_bytes + std::max<int64_t>(int64_t(max_mempool_size_bytes) - nMempoolUsage, 0);
2420 
2422  static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB
2423  int64_t large_threshold =
2424  std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
2425 
2426  if (cacheSize > nTotalSpace) {
2427  LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);
2429  } else if (cacheSize > large_threshold) {
2431  }
2432  return CoinsCacheSizeState::OK;
2433 }
2434 
2436  BlockValidationState &state,
2437  FlushStateMode mode,
2438  int nManualPruneHeight)
2439 {
2440  LOCK(cs_main);
2441  assert(this->CanFlushToDisk());
2442  std::set<int> setFilesToPrune;
2443  bool full_flush_completed = false;
2444 
2445  const size_t coins_count = CoinsTip().GetCacheSize();
2446  const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2447 
2448  try {
2449  {
2450  bool fFlushForPrune = false;
2451  bool fDoFullFlush = false;
2452 
2453  CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
2455  if (m_blockman.IsPruneMode() && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) {
2456  // make sure we don't prune above any of the prune locks bestblocks
2457  // pruning is height-based
2458  int last_prune{m_chain.Height()}; // last height we can prune
2459  std::optional<std::string> limiting_lock; // prune lock that actually was the limiting factor, only used for logging
2460 
2461  for (const auto& prune_lock : m_blockman.m_prune_locks) {
2462  if (prune_lock.second.height_first == std::numeric_limits<int>::max()) continue;
2463  // Remove the buffer and one additional block here to get actual height that is outside of the buffer
2464  const int lock_height{prune_lock.second.height_first - PRUNE_LOCK_BUFFER - 1};
2465  last_prune = std::max(1, std::min(last_prune, lock_height));
2466  if (last_prune == lock_height) {
2467  limiting_lock = prune_lock.first;
2468  }
2469  }
2470 
2471  if (limiting_lock) {
2472  LogPrint(BCLog::PRUNE, "%s limited pruning to height %d\n", limiting_lock.value(), last_prune);
2473  }
2474 
2475  if (nManualPruneHeight > 0) {
2476  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
2477 
2478  m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height());
2479  } else {
2480  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
2481 
2482  m_blockman.FindFilesToPrune(setFilesToPrune, m_chainman.GetParams().PruneAfterHeight(), m_chain.Height(), last_prune, IsInitialBlockDownload());
2484  }
2485  if (!setFilesToPrune.empty()) {
2486  fFlushForPrune = true;
2487  if (!m_blockman.m_have_pruned) {
2488  m_blockman.m_block_tree_db->WriteFlag("prunedblockfiles", true);
2489  m_blockman.m_have_pruned = true;
2490  }
2491  }
2492  }
2493  const auto nNow{SteadyClock::now()};
2494  // Avoid writing/flushing immediately after startup.
2495  if (m_last_write == decltype(m_last_write){}) {
2496  m_last_write = nNow;
2497  }
2498  if (m_last_flush == decltype(m_last_flush){}) {
2499  m_last_flush = nNow;
2500  }
2501  // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
2502  bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
2503  // The cache is over the limit, we have to write now.
2504  bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
2505  // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2506  bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > m_last_write + DATABASE_WRITE_INTERVAL;
2507  // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2508  bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > m_last_flush + DATABASE_FLUSH_INTERVAL;
2509  // Combine all conditions that result in a full cache flush.
2510  fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
2511  // Write blocks and block index to disk.
2512  if (fDoFullFlush || fPeriodicWrite) {
2513  // Ensure we can write block index
2515  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2516  }
2517  {
2518  LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);
2519 
2520  // First make sure all block and undo data is flushed to disk.
2522  }
2523 
2524  // Then update all block file information (which may refer to block and undo files).
2525  {
2526  LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
2527 
2528  if (!m_blockman.WriteBlockIndexDB()) {
2529  return AbortNode(state, "Failed to write to block index database");
2530  }
2531  }
2532  // Finally remove any pruned files
2533  if (fFlushForPrune) {
2534  LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH);
2535 
2536  UnlinkPrunedFiles(setFilesToPrune);
2537  }
2538  m_last_write = nNow;
2539  }
2540  // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2541  if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2542  LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d coins, %.2fkB)",
2543  coins_count, coins_mem_usage / 1000), BCLog::BENCH);
2544 
2545  // Typical Coin structures on disk are around 48 bytes in size.
2546  // Pushing a new one to the database can cause it to be written
2547  // twice (once in the log, and once in the tables). This is already
2548  // an overestimation, as most will delete an existing entry or
2549  // overwrite one. Still, use a conservative safety factor of 2.
2550  if (!CheckDiskSpace(gArgs.GetDataDirNet(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2551  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2552  }
2553  // Flush the chainstate (which may refer to block index entries).
2554  if (!CoinsTip().Flush())
2555  return AbortNode(state, "Failed to write to coin database");
2556  m_last_flush = nNow;
2557  full_flush_completed = true;
2558  TRACE5(utxocache, flush,
2559  int64_t{Ticks<std::chrono::microseconds>(SteadyClock::now() - nNow)},
2560  (uint32_t)mode,
2561  (uint64_t)coins_count,
2562  (uint64_t)coins_mem_usage,
2563  (bool)fFlushForPrune);
2564  }
2565  }
2566  if (full_flush_completed) {
2567  // Update best block in wallet (so we can detect restored wallets).
2569  }
2570  } catch (const std::runtime_error& e) {
2571  return AbortNode(state, std::string("System error while flushing: ") + e.what());
2572  }
2573  return true;
2574 }
2575 
2577 {
2578  BlockValidationState state;
2579  if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
2580  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2581  }
2582 }
2583 
2585 {
2586  BlockValidationState state;
2588  if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) {
2589  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2590  }
2591 }
2592 
2593 static void DoWarning(const bilingual_str& warning)
2594 {
2595  static bool fWarned = false;
2596  SetMiscWarning(warning);
2597  if (!fWarned) {
2598  AlertNotify(warning.original);
2599  fWarned = true;
2600  }
2601 }
2602 
2604 static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
2605 {
2606  if (!res.empty()) res += Untranslated(", ");
2607  res += warn;
2608 }
2609 
2610 static void UpdateTipLog(
2611  const CCoinsViewCache& coins_tip,
2612  const CBlockIndex* tip,
2613  const CChainParams& params,
2614  const std::string& func_name,
2615  const std::string& prefix,
2616  const std::string& warning_messages) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
2617 {
2618 
2620  LogPrintf("%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n",
2621  prefix, func_name,
2622  tip->GetBlockHash().ToString(), tip->nHeight, tip->nVersion,
2623  log(tip->nChainWork.getdouble()) / log(2.0), (unsigned long)tip->nChainTx,
2625  GuessVerificationProgress(params.TxData(), tip),
2626  coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
2627  coins_tip.GetCacheSize(),
2628  !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages) : "");
2629 }
2630 
2631 void Chainstate::UpdateTip(const CBlockIndex* pindexNew)
2632 {
2634  const auto& coins_tip = this->CoinsTip();
2635 
2636  const CChainParams& params{m_chainman.GetParams()};
2637 
2638  // The remainder of the function isn't relevant if we are not acting on
2639  // the active chainstate, so return if need be.
2640  if (this != &m_chainman.ActiveChainstate()) {
2641  // Only log every so often so that we don't bury log messages at the tip.
2642  constexpr int BACKGROUND_LOG_INTERVAL = 2000;
2643  if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) {
2644  UpdateTipLog(coins_tip, pindexNew, params, __func__, "[background validation] ", "");
2645  }
2646  return;
2647  }
2648 
2649  // New best block
2650  if (m_mempool) {
2652  }
2653 
2654  {
2656  g_best_block = pindexNew->GetBlockHash();
2657  g_best_block_cv.notify_all();
2658  }
2659 
2660  bilingual_str warning_messages;
2661  if (!this->IsInitialBlockDownload()) {
2662  const CBlockIndex* pindex = pindexNew;
2663  for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
2665  ThresholdState state = checker.GetStateFor(pindex, params.GetConsensus(), warningcache.at(bit));
2666  if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
2667  const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit);
2668  if (state == ThresholdState::ACTIVE) {
2669  DoWarning(warning);
2670  } else {
2671  AppendWarning(warning_messages, warning);
2672  }
2673  }
2674  }
2675  }
2676  UpdateTipLog(coins_tip, pindexNew, params, __func__, "", warning_messages.original);
2677 }
2678 
2690 {
2693 
2694  CBlockIndex *pindexDelete = m_chain.Tip();
2695  assert(pindexDelete);
2696  assert(pindexDelete->pprev);
2697  // Read block from disk.
2698  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2699  CBlock& block = *pblock;
2700  if (!ReadBlockFromDisk(block, pindexDelete, m_chainman.GetConsensus())) {
2701  return error("DisconnectTip(): Failed to read block");
2702  }
2703  // Apply the block atomically to the chain state.
2704  const auto time_start{SteadyClock::now()};
2705  {
2706  CCoinsViewCache view(&CoinsTip());
2707  assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2708  if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
2709  return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
2710  bool flushed = view.Flush();
2711  assert(flushed);
2712  }
2713  LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
2714  Ticks<MillisecondsDouble>(SteadyClock::now() - time_start));
2715 
2716  {
2717  // Prune locks that began at or after the tip should be moved backward so they get a chance to reorg
2718  const int max_height_first{pindexDelete->nHeight - 1};
2719  for (auto& prune_lock : m_blockman.m_prune_locks) {
2720  if (prune_lock.second.height_first <= max_height_first) continue;
2721 
2722  prune_lock.second.height_first = max_height_first;
2723  LogPrint(BCLog::PRUNE, "%s prune lock moved back to %d\n", prune_lock.first, max_height_first);
2724  }
2725  }
2726 
2727  // Write the chain state to disk, if necessary.
2729  return false;
2730  }
2731 
2732  if (disconnectpool && m_mempool) {
2733  // Save transactions to re-add to mempool at end of reorg
2734  for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
2735  disconnectpool->addTransaction(*it);
2736  }
2737  while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
2738  // Drop the earliest entry, and remove its children from the mempool.
2739  auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
2741  disconnectpool->removeEntry(it);
2742  }
2743  }
2744 
2745  m_chain.SetTip(*pindexDelete->pprev);
2746 
2747  UpdateTip(pindexDelete->pprev);
2748  // Let wallets know transactions went from 1-confirmed to
2749  // 0-confirmed or conflicted:
2750  GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2751  return true;
2752 }
2753 
2754 static SteadyClock::duration time_read_from_disk_total{};
2755 static SteadyClock::duration time_connect_total{};
2756 static SteadyClock::duration time_flush{};
2757 static SteadyClock::duration time_chainstate{};
2758 static SteadyClock::duration time_post_connect{};
2759 
2761  CBlockIndex* pindex = nullptr;
2762  std::shared_ptr<const CBlock> pblock;
2764 };
2773 private:
2774  std::vector<PerBlockConnectTrace> blocksConnected;
2775 
2776 public:
2777  explicit ConnectTrace() : blocksConnected(1) {}
2778 
2779  void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
2780  assert(!blocksConnected.back().pindex);
2781  assert(pindex);
2782  assert(pblock);
2783  blocksConnected.back().pindex = pindex;
2784  blocksConnected.back().pblock = std::move(pblock);
2785  blocksConnected.emplace_back();
2786  }
2787 
2788  std::vector<PerBlockConnectTrace>& GetBlocksConnected() {
2789  // We always keep one extra block at the end of our list because
2790  // blocks are added after all the conflicted transactions have
2791  // been filled in. Thus, the last entry should always be an empty
2792  // one waiting for the transactions from the next block. We pop
2793  // the last entry here to make sure the list we return is sane.
2794  assert(!blocksConnected.back().pindex);
2795  blocksConnected.pop_back();
2796  return blocksConnected;
2797  }
2798 };
2799 
2806 bool Chainstate::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool)
2807 {
2810 
2811  assert(pindexNew->pprev == m_chain.Tip());
2812  // Read block from disk.
2813  const auto time_1{SteadyClock::now()};
2814  std::shared_ptr<const CBlock> pthisBlock;
2815  if (!pblock) {
2816  std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2817  if (!ReadBlockFromDisk(*pblockNew, pindexNew, m_chainman.GetConsensus())) {
2818  return AbortNode(state, "Failed to read block");
2819  }
2820  pthisBlock = pblockNew;
2821  } else {
2822  LogPrint(BCLog::BENCH, " - Using cached block\n");
2823  pthisBlock = pblock;
2824  }
2825  const CBlock& blockConnecting = *pthisBlock;
2826  // Apply the block atomically to the chain state.
2827  const auto time_2{SteadyClock::now()};
2828  time_read_from_disk_total += time_2 - time_1;
2829  SteadyClock::time_point time_3;
2830  LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs (%.2fms/blk)]\n",
2831  Ticks<MillisecondsDouble>(time_2 - time_1),
2832  Ticks<SecondsDouble>(time_read_from_disk_total),
2833  Ticks<MillisecondsDouble>(time_read_from_disk_total) / num_blocks_total);
2834  {
2835  CCoinsViewCache view(&CoinsTip());
2836  bool rv = ConnectBlock(blockConnecting, state, pindexNew, view);
2837  GetMainSignals().BlockChecked(blockConnecting, state);
2838  if (!rv) {
2839  if (state.IsInvalid())
2840  InvalidBlockFound(pindexNew, state);
2841  return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
2842  }
2843  time_3 = SteadyClock::now();
2844  time_connect_total += time_3 - time_2;
2845  assert(num_blocks_total > 0);
2846  LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n",
2847  Ticks<MillisecondsDouble>(time_3 - time_2),
2848  Ticks<SecondsDouble>(time_connect_total),
2849  Ticks<MillisecondsDouble>(time_connect_total) / num_blocks_total);
2850  bool flushed = view.Flush();
2851  assert(flushed);
2852  }
2853  const auto time_4{SteadyClock::now()};
2854  time_flush += time_4 - time_3;
2855  LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n",
2856  Ticks<MillisecondsDouble>(time_4 - time_3),
2857  Ticks<SecondsDouble>(time_flush),
2858  Ticks<MillisecondsDouble>(time_flush) / num_blocks_total);
2859  // Write the chain state to disk, if necessary.
2861  return false;
2862  }
2863  const auto time_5{SteadyClock::now()};
2864  time_chainstate += time_5 - time_4;
2865  LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n",
2866  Ticks<MillisecondsDouble>(time_5 - time_4),
2867  Ticks<SecondsDouble>(time_chainstate),
2868  Ticks<MillisecondsDouble>(time_chainstate) / num_blocks_total);
2869  // Remove conflicting transactions from the mempool.;
2870  if (m_mempool) {
2871  m_mempool->removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
2872  disconnectpool.removeForBlock(blockConnecting.vtx);
2873  }
2874  // Update m_chain & related variables.
2875  m_chain.SetTip(*pindexNew);
2876  UpdateTip(pindexNew);
2877 
2878  const auto time_6{SteadyClock::now()};
2879  time_post_connect += time_6 - time_5;
2880  time_total += time_6 - time_1;
2881  LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n",
2882  Ticks<MillisecondsDouble>(time_6 - time_5),
2883  Ticks<SecondsDouble>(time_post_connect),
2884  Ticks<MillisecondsDouble>(time_post_connect) / num_blocks_total);
2885  LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n",
2886  Ticks<MillisecondsDouble>(time_6 - time_1),
2887  Ticks<SecondsDouble>(time_total),
2888  Ticks<MillisecondsDouble>(time_total) / num_blocks_total);
2889 
2890  // If we are the background validation chainstate, check to see if we are done
2891  // validating the snapshot (i.e. our tip has reached the snapshot's base block).
2892  if (this != &m_chainman.ActiveChainstate()) {
2893  // This call may set `m_disabled`, which is referenced immediately afterwards in
2894  // ActivateBestChain, so that we stop connecting blocks past the snapshot base.
2895  m_chainman.MaybeCompleteSnapshotValidation();
2896  }
2897 
2898  connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2899  return true;
2900 }
2901 
2907 {
2909  do {
2910  CBlockIndex *pindexNew = nullptr;
2911 
2912  // Find the best candidate header.
2913  {
2914  std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
2915  if (it == setBlockIndexCandidates.rend())
2916  return nullptr;
2917  pindexNew = *it;
2918  }
2919 
2920  // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2921  // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2922  CBlockIndex *pindexTest = pindexNew;
2923  bool fInvalidAncestor = false;
2924  while (pindexTest && !m_chain.Contains(pindexTest)) {
2925  assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2926 
2927  // Pruned nodes may have entries in setBlockIndexCandidates for
2928  // which block files have been deleted. Remove those as candidates
2929  // for the most work chain if we come across them; we can't switch
2930  // to a chain unless we have all the non-active-chain parent blocks.
2931  bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
2932  bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
2933  if (fFailedChain || fMissingData) {
2934  // Candidate chain is not usable (either invalid or missing data)
2935  if (fFailedChain && (m_chainman.m_best_invalid == nullptr || pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork)) {
2936  m_chainman.m_best_invalid = pindexNew;
2937  }
2938  CBlockIndex *pindexFailed = pindexNew;
2939  // Remove the entire chain from the set.
2940  while (pindexTest != pindexFailed) {
2941  if (fFailedChain) {
2942  pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
2943  } else if (fMissingData) {
2944  // If we're missing data, then add back to m_blocks_unlinked,
2945  // so that if the block arrives in the future we can try adding
2946  // to setBlockIndexCandidates again.
2948  std::make_pair(pindexFailed->pprev, pindexFailed));
2949  }
2950  setBlockIndexCandidates.erase(pindexFailed);
2951  pindexFailed = pindexFailed->pprev;
2952  }
2953  setBlockIndexCandidates.erase(pindexTest);
2954  fInvalidAncestor = true;
2955  break;
2956  }
2957  pindexTest = pindexTest->pprev;
2958  }
2959  if (!fInvalidAncestor)
2960  return pindexNew;
2961  } while(true);
2962 }
2963 
2966  // Note that we can't delete the current block itself, as we may need to return to it later in case a
2967  // reorganization to a better block fails.
2968  std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
2969  while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2970  setBlockIndexCandidates.erase(it++);
2971  }
2972  // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2973  assert(!setBlockIndexCandidates.empty());
2974 }
2975 
2982 bool Chainstate::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
2983 {
2986 
2987  const CBlockIndex* pindexOldTip = m_chain.Tip();
2988  const CBlockIndex* pindexFork = m_chain.FindFork(pindexMostWork);
2989 
2990  // Disconnect active blocks which are no longer in the best chain.
2991  bool fBlocksDisconnected = false;
2992  DisconnectedBlockTransactions disconnectpool;
2993  while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2994  if (!DisconnectTip(state, &disconnectpool)) {
2995  // This is likely a fatal error, but keep the mempool consistent,
2996  // just in case. Only remove from the mempool in this case.
2997  MaybeUpdateMempoolForReorg(disconnectpool, false);
2998 
2999  // If we're unable to disconnect a block during normal operation,
3000  // then that is a failure of our local system -- we should abort
3001  // rather than stay on a less work chain.
3002  AbortNode(state, "Failed to disconnect block; see debug.log for details");
3003  return false;
3004  }
3005  fBlocksDisconnected = true;
3006  }
3007 
3008  // Build list of new blocks to connect (in descending height order).
3009  std::vector<CBlockIndex*> vpindexToConnect;
3010  bool fContinue = true;
3011  int nHeight = pindexFork ? pindexFork->nHeight : -1;
3012  while (fContinue && nHeight != pindexMostWork->nHeight) {
3013  // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
3014  // a few blocks along the way.
3015  int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
3016  vpindexToConnect.clear();
3017  vpindexToConnect.reserve(nTargetHeight - nHeight);
3018  CBlockIndex* pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
3019  while (pindexIter && pindexIter->nHeight != nHeight) {
3020  vpindexToConnect.push_back(pindexIter);
3021  pindexIter = pindexIter->pprev;
3022  }
3023  nHeight = nTargetHeight;
3024 
3025  // Connect new blocks.
3026  for (CBlockIndex* pindexConnect : reverse_iterate(vpindexToConnect)) {
3027  if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
3028  if (state.IsInvalid()) {
3029  // The block violates a consensus rule.
3031  InvalidChainFound(vpindexToConnect.front());
3032  }
3033  state = BlockValidationState();
3034  fInvalidFound = true;
3035  fContinue = false;
3036  break;
3037  } else {
3038  // A system error occurred (disk space, database error, ...).
3039  // Make the mempool consistent with the current tip, just in case
3040  // any observers try to use it before shutdown.
3041  MaybeUpdateMempoolForReorg(disconnectpool, false);
3042  return false;
3043  }
3044  } else {
3046  if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
3047  // We're in a better position than we were. Return temporarily to release the lock.
3048  fContinue = false;
3049  break;
3050  }
3051  }
3052  }
3053  }
3054 
3055  if (fBlocksDisconnected) {
3056  // If any blocks were disconnected, disconnectpool may be non empty. Add
3057  // any disconnected transactions back to the mempool.
3058  MaybeUpdateMempoolForReorg(disconnectpool, true);
3059  }
3060  if (m_mempool) m_mempool->check(this->CoinsTip(), this->m_chain.Height() + 1);
3061 
3063 
3064  return true;
3065 }
3066 
3068 {
3072 }
3073 
3075  bool fNotify = false;
3076  bool fInitialBlockDownload = false;
3077  static CBlockIndex* pindexHeaderOld = nullptr;
3078  CBlockIndex* pindexHeader = nullptr;
3079  {
3080  LOCK(cs_main);
3081  pindexHeader = chainstate.m_chainman.m_best_header;
3082 
3083  if (pindexHeader != pindexHeaderOld) {
3084  fNotify = true;
3085  fInitialBlockDownload = chainstate.IsInitialBlockDownload();
3086  pindexHeaderOld = pindexHeader;
3087  }
3088  }
3089  // Send block tip changed notifications without cs_main
3090  if (fNotify) {
3091  uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader->nHeight, pindexHeader->nTime, false);
3092  }
3093  return fNotify;
3094 }
3095 
3098 
3099  if (GetMainSignals().CallbacksPending() > 10) {
3101  }
3102 }
3103 
3104 bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr<const CBlock> pblock)
3105 {
3107 
3108  // Note that while we're often called here from ProcessNewBlock, this is
3109  // far from a guarantee. Things in the P2P/RPC will often end up calling
3110  // us in the middle of ProcessNewBlock - do not assume pblock is set
3111  // sanely for performance or correctness!
3113 
3114  // ABC maintains a fair degree of expensive-to-calculate internal state
3115  // because this function periodically releases cs_main so that it does not lock up other threads for too long
3116  // during large connects - and to allow for e.g. the callback queue to drain
3117  // we use m_chainstate_mutex to enforce mutual exclusion so that only one caller may execute this function at a time
3119 
3120  // Belt-and-suspenders check that we aren't attempting to advance the background
3121  // chainstate past the snapshot base block.
3122  if (WITH_LOCK(::cs_main, return m_disabled)) {
3123  LogPrintf("m_disabled is set - this chainstate should not be in operation. " /* Continued */
3124  "Please report this as a bug. %s\n", PACKAGE_BUGREPORT);
3125  return false;
3126  }
3127 
3128  CBlockIndex *pindexMostWork = nullptr;
3129  CBlockIndex *pindexNewTip = nullptr;
3130  int nStopAtHeight = gArgs.GetIntArg("-stopatheight", DEFAULT_STOPATHEIGHT);
3131  do {
3132  // Block until the validation queue drains. This should largely
3133  // never happen in normal operation, however may happen during
3134  // reindex, causing memory blowup if we run too far ahead.
3135  // Note that if a validationinterface callback ends up calling
3136  // ActivateBestChain this may lead to a deadlock! We should
3137  // probably have a DEBUG_LOCKORDER test for this in the future.
3139 
3140  {
3141  LOCK(cs_main);
3142  // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
3143  LOCK(MempoolMutex());
3144  CBlockIndex* starting_tip = m_chain.Tip();
3145  bool blocks_connected = false;
3146  do {
3147  // We absolutely may not unlock cs_main until we've made forward progress
3148  // (with the exception of shutdown due to hardware issues, low disk space, etc).
3149  ConnectTrace connectTrace; // Destructed before cs_main is unlocked
3150 
3151  if (pindexMostWork == nullptr) {
3152  pindexMostWork = FindMostWorkChain();
3153  }
3154 
3155  // Whether we have anything to do at all.
3156  if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
3157  break;
3158  }
3159 
3160  bool fInvalidFound = false;
3161  std::shared_ptr<const CBlock> nullBlockPtr;
3162  if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
3163  // A system error occurred
3164  return false;
3165  }
3166  blocks_connected = true;
3167 
3168  if (fInvalidFound) {
3169  // Wipe cache, we may need another branch now.
3170  pindexMostWork = nullptr;
3171  }
3172  pindexNewTip = m_chain.Tip();
3173 
3174  for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
3175  assert(trace.pblock && trace.pindex);
3176  GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
3177  }
3178 
3179  // This will have been toggled in
3180  // ActivateBestChainStep -> ConnectTip -> MaybeCompleteSnapshotValidation,
3181  // if at all, so we should catch it here.
3182  //
3183  // Break this do-while to ensure we don't advance past the base snapshot.
3184  if (m_disabled) {
3185  break;
3186  }
3187  } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
3188  if (!blocks_connected) return true;
3189 
3190  const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
3191  bool fInitialDownload = IsInitialBlockDownload();
3192 
3193  // Notify external listeners about the new tip.
3194  // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected
3195  if (pindexFork != pindexNewTip) {
3196  // Notify ValidationInterface subscribers
3197  GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
3198 
3199  // Always notify the UI if a new block tip was connected
3200  uiInterface.NotifyBlockTip(GetSynchronizationState(fInitialDownload), pindexNewTip);
3201  }
3202  }
3203  // When we reach this point, we switched to a new tip (stored in pindexNewTip).
3204 
3205  if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown();
3206 
3207  if (WITH_LOCK(::cs_main, return m_disabled)) {
3208  // Background chainstate has reached the snapshot base block, so exit.
3209  break;
3210  }
3211 
3212  // We check shutdown only after giving ActivateBestChainStep a chance to run once so that we
3213  // never shutdown before connecting the genesis block during LoadChainTip(). Previously this
3214  // caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks
3215  // that the best block hash is non-null.
3216  if (ShutdownRequested()) break;
3217  } while (pindexNewTip != pindexMostWork);
3218  CheckBlockIndex();
3219 
3220  // Write changes periodically to disk, after relay.
3222  return false;
3223  }
3224 
3225  return true;
3226 }
3227 
3228 bool Chainstate::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex)
3229 {
3232  {
3233  LOCK(cs_main);
3234  if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
3235  // Nothing to do, this block is not at the tip.
3236  return true;
3237  }
3239  // The chain has been extended since the last call, reset the counter.
3241  }
3243  setBlockIndexCandidates.erase(pindex);
3245  if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
3246  // We can't keep reducing the counter if somebody really wants to
3247  // call preciousblock 2**31-1 times on the same set of tips...
3249  }
3250  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
3251  setBlockIndexCandidates.insert(pindex);
3253  }
3254  }
3255 
3256  return ActivateBestChain(state, std::shared_ptr<const CBlock>());
3257 }
3258 
3259 bool Chainstate::InvalidateBlock(BlockValidationState& state, CBlockIndex* pindex)
3260 {
3263 
3264  // Genesis block can't be invalidated
3265  assert(pindex);
3266  if (pindex->nHeight == 0) return false;
3267 
3268  CBlockIndex* to_mark_failed = pindex;
3269  bool pindex_was_in_chain = false;
3270  int disconnected = 0;
3271 
3272  // We do not allow ActivateBestChain() to run while InvalidateBlock() is
3273  // running, as that could cause the tip to change while we disconnect
3274  // blocks.
3276 
3277  // We'll be acquiring and releasing cs_main below, to allow the validation
3278  // callbacks to run. However, we should keep the block index in a
3279  // consistent state as we disconnect blocks -- in particular we need to
3280  // add equal-work blocks to setBlockIndexCandidates as we disconnect.
3281  // To avoid walking the block index repeatedly in search of candidates,
3282  // build a map once so that we can look up candidate blocks by chain
3283  // work as we go.
3284  std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
3285 
3286  {
3287  LOCK(cs_main);
3288  for (auto& entry : m_blockman.m_block_index) {
3289  CBlockIndex* candidate = &entry.second;
3290  // We don't need to put anything in our active chain into the
3291  // multimap, because those candidates will be found and considered
3292  // as we disconnect.
3293  // Instead, consider only non-active-chain blocks that have at
3294  // least as much work as where we expect the new tip to end up.
3295  if (!m_chain.Contains(candidate) &&
3296  !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
3297  candidate->IsValid(BLOCK_VALID_TRANSACTIONS) &&
3298  candidate->HaveTxsDownloaded()) {
3299  candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate));
3300  }
3301  }
3302  }
3303 
3304  // Disconnect (descendants of) pindex, and mark them invalid.
3305  while (true) {
3306  if (ShutdownRequested()) break;
3307 
3308  // Make sure the queue of validation callbacks doesn't grow unboundedly.
3310 
3311  LOCK(cs_main);
3312  // Lock for as long as disconnectpool is in scope to make sure MaybeUpdateMempoolForReorg is
3313  // called after DisconnectTip without unlocking in between
3314  LOCK(MempoolMutex());
3315  if (!m_chain.Contains(pindex)) break;
3316  pindex_was_in_chain = true;
3317  CBlockIndex *invalid_walk_tip = m_chain.Tip();
3318 
3319  // ActivateBestChain considers blocks already in m_chain
3320  // unconditionally valid already, so force disconnect away from it.
3321  DisconnectedBlockTransactions disconnectpool;
3322  bool ret = DisconnectTip(state, &disconnectpool);
3323  // DisconnectTip will add transactions to disconnectpool.
3324  // Adjust the mempool to be consistent with the new tip, adding
3325  // transactions back to the mempool if disconnecting was successful,
3326  // and we're not doing a very deep invalidation (in which case
3327  // keeping the mempool up to date is probably futile anyway).
3328  MaybeUpdateMempoolForReorg(disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
3329  if (!ret) return false;
3330  assert(invalid_walk_tip->pprev == m_chain.Tip());
3331 
3332  // We immediately mark the disconnected blocks as invalid.
3333  // This prevents a case where pruned nodes may fail to invalidateblock
3334  // and be left unable to start as they have no tip candidates (as there
3335  // are no blocks that meet the "have data and are not invalid per
3336  // nStatus" criteria for inclusion in setBlockIndexCandidates).
3337  invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
3338  m_blockman.m_dirty_blockindex.insert(invalid_walk_tip);
3339  setBlockIndexCandidates.erase(invalid_walk_tip);
3340  setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
3341  if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
3342  // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
3343  // need to be BLOCK_FAILED_CHILD instead.
3344  to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
3345  m_blockman.m_dirty_blockindex.insert(to_mark_failed);
3346  }
3347 
3348  // Add any equal or more work headers to setBlockIndexCandidates
3349  auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork);
3350  while (candidate_it != candidate_blocks_by_work.end()) {
3351  if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) {
3352  setBlockIndexCandidates.insert(candidate_it->second);
3353  candidate_it = candidate_blocks_by_work.erase(candidate_it);
3354  } else {
3355  ++candidate_it;
3356  }
3357  }
3358 
3359  // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
3360  // iterations, or, if it's the last one, call InvalidChainFound on it.
3361  to_mark_failed = invalid_walk_tip;
3362  }
3363 
3364  CheckBlockIndex();
3365 
3366  {
3367  LOCK(cs_main);
3368  if (m_chain.Contains(to_mark_failed)) {
3369  // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
3370  return false;
3371  }
3372 
3373  // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
3374  to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
3375  m_blockman.m_dirty_blockindex.insert(to_mark_failed);
3376  setBlockIndexCandidates.erase(to_mark_failed);
3377  m_chainman.m_failed_blocks.insert(to_mark_failed);
3378 
3379  // If any new blocks somehow arrived while we were disconnecting
3380  // (above), then the pre-calculation of what should go into
3381  // setBlockIndexCandidates may have missed entries. This would
3382  // technically be an inconsistency in the block index, but if we clean
3383  // it up here, this should be an essentially unobservable error.
3384  // Loop back over all block index entries and add any missing entries
3385  // to setBlockIndexCandidates.
3386  for (auto& [_, block_index] : m_blockman.m_block_index) {
3387  if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(&block_index, m_chain.Tip())) {
3388  setBlockIndexCandidates.insert(&block_index);
3389  }
3390  }
3391 
3392  InvalidChainFound(to_mark_failed);
3393  }
3394 
3395  // Only notify about a new block tip if the active chain was modified.
3396  if (pindex_was_in_chain) {
3397  uiInterface.NotifyBlockTip(GetSynchronizationState(IsInitialBlockDownload()), to_mark_failed->pprev);
3398  }
3399  return true;
3400 }
3401 
3404 
3405  int nHeight = pindex->nHeight;
3406 
3407  // Remove the invalidity flag from this block and all its descendants.
3408  for (auto& [_, block_index] : m_blockman.m_block_index) {
3409  if (!block_index.IsValid() && block_index.GetAncestor(nHeight) == pindex) {
3410  block_index.nStatus &= ~BLOCK_FAILED_MASK;
3411  m_blockman.m_dirty_blockindex.insert(&block_index);
3412  if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), &block_index)) {
3413  setBlockIndexCandidates.insert(&block_index);
3414  }
3415  if (&block_index == m_chainman.m_best_invalid) {
3416  // Reset invalid block marker if it was pointing to one of those.
3417  m_chainman.m_best_invalid = nullptr;
3418  }
3419  m_chainman.m_failed_blocks.erase(&block_index);
3420  }
3421  }
3422 
3423  // Remove the invalidity flag from all ancestors too.
3424  while (pindex != nullptr) {
3425  if (pindex->nStatus & BLOCK_FAILED_MASK) {
3426  pindex->nStatus &= ~BLOCK_FAILED_MASK;
3427  m_blockman.m_dirty_blockindex.insert(pindex);
3428  m_chainman.m_failed_blocks.erase(pindex);
3429  }
3430  pindex = pindex->pprev;
3431  }
3432 }
3433 
3435 void Chainstate::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos)
3436 {
3438  pindexNew->nTx = block.vtx.size();
3439  pindexNew->nChainTx = 0;
3440  pindexNew->nFile = pos.nFile;
3441  pindexNew->nDataPos = pos.nPos;
3442  pindexNew->nUndoPos = 0;
3443  pindexNew->nStatus |= BLOCK_HAVE_DATA;
3445  pindexNew->nStatus |= BLOCK_OPT_WITNESS;
3446  }
3448  m_blockman.m_dirty_blockindex.insert(pindexNew);
3449 
3450  if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
3451  // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
3452  std::deque<CBlockIndex*> queue;
3453  queue.push_back(pindexNew);
3454 
3455  // Recursively process any descendant blocks that now may be eligible to be connected.
3456  while (!queue.empty()) {
3457  CBlockIndex *pindex = queue.front();
3458  queue.pop_front();
3459  pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
3460  pindex->nSequenceId = nBlockSequenceId++;
3461  if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
3462  setBlockIndexCandidates.insert(pindex);
3463  }
3464  std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex);
3465  while (range.first != range.second) {
3466  std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
3467  queue.push_back(it->second);
3468  range.first++;
3469  m_blockman.m_blocks_unlinked.erase(it);
3470  }
3471  }
3472  } else {
3473  if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
3474  m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
3475  }
3476  }
3477 }
3478 
3479 static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
3480 {
3481  // Check proof of work matches claimed amount
3482  if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
3483  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed");
3484 
3485  return true;
3486 }
3487 
3488 bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
3489 {
3490  // These are checks that are independent of context.
3491 
3492  if (block.fChecked)
3493  return true;
3494 
3495  // Check that the header is valid (particularly PoW). This is mostly
3496  // redundant with the call in AcceptBlockHeader.
3497  if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
3498  return false;
3499 
3500  // Signet only: check block solution
3501  if (consensusParams.signet_blocks && fCheckPOW && !CheckSignetBlockSolution(block, consensusParams)) {
3502  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-signet-blksig", "signet block signature validation failure");
3503  }
3504 
3505  // Check the merkle root.
3506  if (fCheckMerkleRoot) {
3507  bool mutated;
3508  uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3509  if (block.hashMerkleRoot != hashMerkleRoot2)
3510  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch");
3511 
3512  // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3513  // of transactions in a block without affecting the merkle root of a block,
3514  // while still invalidating it.
3515  if (mutated)
3516  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction");
3517  }
3518 
3519  // All potential-corruption validation must be done before we do any
3520  // transaction validation, as otherwise we may mark the header as invalid
3521  // because we receive the wrong transactions for it.
3522  // Note that witness malleability is checked in ContextualCheckBlock, so no
3523  // checks that use witness data may be performed here.
3524 
3525  // Size limits
3527  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed");
3528 
3529  // First transaction must be coinbase, the rest must not be
3530  if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
3531  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase");
3532  for (unsigned int i = 1; i < block.vtx.size(); i++)
3533  if (block.vtx[i]->IsCoinBase())
3534  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase");
3535 
3536  // Check transactions
3537  // Must check for duplicate inputs (see CVE-2018-17144)
3538  for (const auto& tx : block.vtx) {
3539  TxValidationState tx_state;
3540  if (!CheckTransaction(*tx, tx_state)) {
3541  // CheckBlock() does context-free validation checks. The only
3542  // possible failures are consensus failures.
3545  strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));
3546  }
3547  }
3548  unsigned int nSigOps = 0;
3549  for (const auto& tx : block.vtx)
3550  {
3551  nSigOps += GetLegacySigOpCount(*tx);
3552  }
3554  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount");
3555 
3556  if (fCheckPOW && fCheckMerkleRoot)
3557  block.fChecked = true;
3558 
3559  return true;
3560 }
3561 
3562 void ChainstateManager::UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev) const
3563 {
3564  int commitpos = GetWitnessCommitmentIndex(block);
3565  static const std::vector<unsigned char> nonce(32, 0x00);
3566  if (commitpos != NO_WITNESS_COMMITMENT && DeploymentActiveAfter(pindexPrev, *this, Consensus::DEPLOYMENT_SEGWIT) && !block.vtx[0]->HasWitness()) {
3567  CMutableTransaction tx(*block.vtx[0]);
3568  tx.vin[0].scriptWitness.stack.resize(1);
3569  tx.vin[0].scriptWitness.stack[0] = nonce;
3570  block.vtx[0] = MakeTransactionRef(std::move(tx));
3571  }
3572 }
3573 
3574 std::vector<unsigned char> ChainstateManager::GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev) const
3575 {
3576  std::vector<unsigned char> commitment;
3577  int commitpos = GetWitnessCommitmentIndex(block);
3578  std::vector<unsigned char> ret(32, 0x00);
3579  if (commitpos == NO_WITNESS_COMMITMENT) {
3580  uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
3581  CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
3582  CTxOut out;
3583  out.nValue = 0;
3585  out.scriptPubKey[0] = OP_RETURN;
3586  out.scriptPubKey[1] = 0x24;
3587  out.scriptPubKey[2] = 0xaa;
3588  out.scriptPubKey[3] = 0x21;
3589  out.scriptPubKey[4] = 0xa9;
3590  out.scriptPubKey[5] = 0xed;
3591  memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
3592  commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
3593  CMutableTransaction tx(*block.vtx[0]);
3594  tx.vout.push_back(out);
3595  block.vtx[0] = MakeTransactionRef(std::move(tx));
3596  }
3597  UpdateUncommittedBlockStructures(block, pindexPrev);
3598  return commitment;
3599 }
3600 
3601 bool HasValidProofOfWork(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams)
3602 {
3603  return std::all_of(headers.cbegin(), headers.cend(),
3604  [&](const auto& header) { return CheckProofOfWork(header.GetHash(), header.nBits, consensusParams);});
3605 }
3606 
3607 arith_uint256 CalculateHeadersWork(const std::vector<CBlockHeader>& headers)
3608 {
3609  arith_uint256 total_work{0};
3610  for (const CBlockHeader& header : headers) {
3611  CBlockIndex dummy(header);
3612  total_work += GetBlockProof(dummy);
3613  }
3614  return total_work;
3615 }
3616 
3627 {
3629  assert(pindexPrev != nullptr);
3630  const int nHeight = pindexPrev->nHeight + 1;
3631 
3632  // Check proof of work
3633  const Consensus::Params& consensusParams = chainman.GetConsensus();
3634  if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
3635  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
3636 
3637  // Check against checkpoints
3638  if (chainman.m_options.checkpoints_enabled) {
3639  // Don't accept any forks from the main chain prior to last checkpoint.
3640  // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
3641  // BlockIndex().
3642  const CBlockIndex* pcheckpoint = blockman.GetLastCheckpoint(chainman.GetParams().Checkpoints());
3643  if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3644  LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight);
3645  return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint");
3646  }
3647  }
3648 
3649  // Check timestamp against prev
3650  if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
3651  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
3652 
3653  // Check timestamp
3654  if (block.Time() > now + std::chrono::seconds{MAX_FUTURE_BLOCK_TIME}) {
3655  return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
3656  }
3657 
3658  // Reject blocks with outdated version
3659  if ((block.nVersion < 2 && DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_HEIGHTINCB)) ||
3660  (block.nVersion < 3 && DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_DERSIG)) ||
3661  (block.nVersion < 4 && DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_CLTV))) {
3662  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
3663  strprintf("rejected nVersion=0x%08x block", block.nVersion));
3664  }
3665 
3666  return true;
3667 }
3668 
3675 static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const ChainstateManager& chainman, const CBlockIndex* pindexPrev)
3676 {
3677  const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3678 
3679  // Enforce BIP113 (Median Time Past).
3680  bool enforce_locktime_median_time_past{false};
3681  if (DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_CSV)) {
3682  assert(pindexPrev != nullptr);
3683  enforce_locktime_median_time_past = true;
3684  }
3685 
3686  const int64_t nLockTimeCutoff{enforce_locktime_median_time_past ?
3687  pindexPrev->GetMedianTimePast() :
3688  block.GetBlockTime()};
3689 
3690  // Check that all transactions are finalized
3691  for (const auto& tx : block.vtx) {
3692  if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
3693  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction");
3694  }
3695  }
3696 
3697  // Enforce rule that the coinbase starts with serialized block height
3698  if (DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_HEIGHTINCB))
3699  {
3700  CScript expect = CScript() << nHeight;
3701  if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
3702  !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
3703  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase");
3704  }
3705  }
3706 
3707  // Validation for witness commitments.
3708  // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
3709  // coinbase (where 0x0000....0000 is used instead).
3710  // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained).
3711  // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
3712  // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
3713  // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are
3714  // multiple, the last one is used.
3715  bool fHaveWitness = false;
3716  if (DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_SEGWIT)) {
3717  int commitpos = GetWitnessCommitmentIndex(block);
3718  if (commitpos != NO_WITNESS_COMMITMENT) {
3719  bool malleated = false;
3720  uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
3721  // The malleation check is ignored; as the transaction tree itself
3722  // already does not permit it, it is impossible to trigger in the
3723  // witness tree.
3724  if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
3725  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
3726  }
3727  CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness);
3728  if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
3729  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
3730  }
3731  fHaveWitness = true;
3732  }
3733  }
3734 
3735  // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
3736  if (!fHaveWitness) {
3737  for (const auto& tx : block.vtx) {
3738  if (tx->HasWitness()) {
3739  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
3740  }
3741  }
3742  }
3743 
3744  // After the coinbase witness reserved value and commitment are verified,
3745  // we can check if the block weight passes (before we've checked the
3746  // coinbase witness, it would be possible for the weight to be too
3747  // large by filling up the coinbase witness, which doesn't change
3748  // the block hash, so we couldn't mark the block as permanently
3749  // failed).
3750  if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
3751  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
3752  }
3753 
3754  return true;
3755 }
3756 
3757 bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, CBlockIndex** ppindex, bool min_pow_checked)
3758 {
3760 
3761  // Check for duplicate
3762  uint256 hash = block.GetHash();
3763  BlockMap::iterator miSelf{m_blockman.m_block_index.find(hash)};
3764  if (hash != GetConsensus().hashGenesisBlock) {
3765  if (miSelf != m_blockman.m_block_index.end()) {
3766  // Block header is already known.
3767  CBlockIndex* pindex = &(miSelf->second);
3768  if (ppindex)
3769  *ppindex = pindex;
3770  if (pindex->nStatus & BLOCK_FAILED_MASK) {
3771  LogPrint(BCLog::VALIDATION, "%s: block %s is marked invalid\n", __func__, hash.ToString());
3772  return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
3773  }
3774  return true;
3775  }
3776 
3777  if (!CheckBlockHeader(block, state, GetConsensus())) {
3778  LogPrint(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
3779  return false;
3780  }
3781 
3782  // Get prev block index
3783  CBlockIndex* pindexPrev = nullptr;
3784  BlockMap::iterator mi{m_blockman.m_block_index.find(block.hashPrevBlock)};
3785  if (mi == m_blockman.m_block_index.end()) {
3786  LogPrint(BCLog::VALIDATION, "header %s has prev block not found: %s\n", hash.ToString(), block.hashPrevBlock.ToString());
3787  return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
3788  }
3789  pindexPrev = &((*mi).second);
3790  if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
3791  LogPrint(BCLog::VALIDATION, "header %s has prev block invalid: %s\n", hash.ToString(), block.hashPrevBlock.ToString());
3792  return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3793  }
3794  if (!ContextualCheckBlockHeader(block, state, m_blockman, *this, pindexPrev, m_options.adjusted_time_callback())) {
3795  LogPrint(BCLog::VALIDATION, "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
3796  return false;
3797  }
3798 
3799  /* Determine if this block descends from any block which has been found
3800  * invalid (m_failed_blocks), then mark pindexPrev and any blocks between
3801  * them as failed. For example:
3802  *
3803  * D3
3804  * /
3805  * B2 - C2
3806  * / \
3807  * A D2 - E2 - F2
3808  * \
3809  * B1 - C1 - D1 - E1
3810  *
3811  * In the case that we attempted to reorg from E1 to F2, only to find
3812  * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
3813  * but NOT D3 (it was not in any of our candidate sets at the time).
3814  *
3815  * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
3816  * in LoadBlockIndex.
3817  */
3818  if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
3819  // The above does not mean "invalid": it checks if the previous block
3820  // hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
3821  // optimization, in the common case of adding a new block to the tip,
3822  // we don't need to iterate over the failed blocks list.
3823  for (const CBlockIndex* failedit : m_failed_blocks) {
3824  if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
3825  assert(failedit->nStatus & BLOCK_FAILED_VALID);
3826  CBlockIndex* invalid_walk = pindexPrev;
3827  while (invalid_walk != failedit) {
3828  invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
3829  m_blockman.m_dirty_blockindex.insert(invalid_walk);
3830  invalid_walk = invalid_walk->pprev;
3831  }
3832  LogPrint(BCLog::VALIDATION, "header %s has prev block invalid: %s\n", hash.ToString(), block.hashPrevBlock.ToString());
3833  return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3834  }
3835  }
3836  }
3837  }
3838  if (!min_pow_checked) {
3839  LogPrint(BCLog::VALIDATION, "%s: not adding new block header %s, missing anti-dos proof-of-work validation\n", __func__, hash.ToString());
3840  return state.Invalid(BlockValidationResult::BLOCK_HEADER_LOW_WORK, "too-little-chainwork");
3841  }
3843 
3844  if (ppindex)
3845  *ppindex = pindex;
3846 
3847  // Since this is the earliest point at which we have determined that a
3848  // header is both new and valid, log here.
3849  //
3850  // These messages are valuable for detecting potential selfish mining behavior;
3851  // if multiple displacing headers are seen near simultaneously across many
3852  // nodes in the network, this might be an indication of selfish mining. Having
3853  // this log by default when not in IBD ensures broad availability of this data
3854  // in case investigation is merited.
3855  const auto msg = strprintf(
3856  "Saw new header hash=%s height=%d", hash.ToString(), pindex->nHeight);
3857 
3858  if (ActiveChainstate().IsInitialBlockDownload()) {
3860  } else {
3861  LogPrintf("%s\n", msg);
3862  }
3863 
3864  return true;
3865 }
3866 
3867 // Exposed wrapper for AcceptBlockHeader
3868 bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, bool min_pow_checked, BlockValidationState& state, const CBlockIndex** ppindex)
3869 {
3871  {
3872  LOCK(cs_main);
3873  for (const CBlockHeader& header : headers) {
3874  CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
3875  bool accepted{AcceptBlockHeader(header, state, &pindex, min_pow_checked)};
3876  ActiveChainstate().CheckBlockIndex();
3877 
3878  if (!accepted) {
3879  return false;
3880  }
3881  if (ppindex) {
3882  *ppindex = pindex;
3883  }
3884  }
3885  }
3886  if (NotifyHeaderTip(ActiveChainstate())) {
3887  if (ActiveChainstate().IsInitialBlockDownload() && ppindex && *ppindex) {
3888  const CBlockIndex& last_accepted{**ppindex};
3889  const int64_t blocks_left{(GetTime() - last_accepted.GetBlockTime()) / GetConsensus().nPowTargetSpacing};
3890  const double progress{100.0 * last_accepted.nHeight / (last_accepted.nHeight + blocks_left)};
3891  LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", last_accepted.nHeight, progress);
3892  }
3893  }
3894  return true;
3895 }
3896 
3897 void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t height, int64_t timestamp)
3898 {
3900  const auto& chainstate = ActiveChainstate();
3901  {
3902  LOCK(cs_main);
3903  // Don't report headers presync progress if we already have a post-minchainwork header chain.
3904  // This means we lose reporting for potentially legitimate, but unlikely, deep reorgs, but
3905  // prevent attackers that spam low-work headers from filling our logs.
3906  if (m_best_header->nChainWork >= UintToArith256(GetConsensus().nMinimumChainWork)) return;
3907  // Rate limit headers presync updates to 4 per second, as these are not subject to DoS
3908  // protection.
3909  auto now = std::chrono::steady_clock::now();
3910  if (now < m_last_presync_update + std::chrono::milliseconds{250}) return;
3911  m_last_presync_update = now;
3912  }
3913  bool initial_download = chainstate.IsInitialBlockDownload();
3914  uiInterface.NotifyHeaderTip(GetSynchronizationState(initial_download), height, timestamp, /*presync=*/true);
3915  if (initial_download) {
3916  const int64_t blocks_left{(GetTime() - timestamp) / GetConsensus().nPowTargetSpacing};
3917  const double progress{100.0 * height / (height + blocks_left)};
3918  LogPrintf("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n", height, progress);
3919  }
3920 }
3921 
3923 bool Chainstate::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock, bool min_pow_checked)
3924 {
3925  const CBlock& block = *pblock;
3926 
3927  if (fNewBlock) *fNewBlock = false;
3929 
3930  CBlockIndex *pindexDummy = nullptr;
3931  CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
3932 
3933  bool accepted_header{m_chainman.AcceptBlockHeader(block, state, &pindex, min_pow_checked)};
3934  CheckBlockIndex();
3935 
3936  if (!accepted_header)
3937  return false;
3938 
3939  // Try to process all requested blocks that we don't have, but only
3940  // process an unrequested block if it's new and has enough work to
3941  // advance our tip, and isn't too many blocks ahead.
3942  bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
3943  bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
3944  // Blocks that are too out-of-order needlessly limit the effectiveness of
3945  // pruning, because pruning will not delete block files that contain any
3946  // blocks which are too close in height to the tip. Apply this test
3947  // regardless of whether pruning is enabled; it should generally be safe to
3948  // not process unrequested blocks.
3949  bool fTooFarAhead{pindex->nHeight > m_chain.Height() + int(MIN_BLOCKS_TO_KEEP)};
3950 
3951  // TODO: Decouple this function from the block download logic by removing fRequested
3952  // This requires some new chain data structure to efficiently look up if a
3953  // block is in a chain leading to a candidate for best tip, despite not
3954  // being such a candidate itself.
3955  // Note that this would break the getblockfrompeer RPC
3956 
3957  // TODO: deal better with return value and error conditions for duplicate
3958  // and unrequested blocks.
3959  if (fAlreadyHave) return true;
3960  if (!fRequested) { // If we didn't ask for it:
3961  if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
3962  if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
3963  if (fTooFarAhead) return true; // Block height is too high
3964 
3965  // Protect against DoS attacks from low-work chains.
3966  // If our tip is behind, a peer could try to send us
3967  // low-work blocks on a fake chain that we would never
3968  // request; don't process these.
3969  if (pindex->nChainWork < m_chainman.MinimumChainWork()) return true;
3970  }
3971 
3972  const CChainParams& params{m_chainman.GetParams()};
3973 
3974  if (!CheckBlock(block, state, params.GetConsensus()) ||
3975  !ContextualCheckBlock(block, state, m_chainman, pindex->pprev)) {
3976  if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
3977  pindex->nStatus |= BLOCK_FAILED_VALID;
3978  m_blockman.m_dirty_blockindex.insert(pindex);
3979  }
3980  return error("%s: %s", __func__, state.ToString());
3981  }
3982 
3983  // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
3984  // (but if it does not build on our best tip, let the SendMessages loop relay it)
3985  if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
3986  GetMainSignals().NewPoWValidBlock(pindex, pblock);
3987 
3988  // Write block to history file
3989  if (fNewBlock) *fNewBlock = true;
3990  try {
3991  FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, pindex->nHeight, m_chain, params, dbp)};
3992  if (blockPos.IsNull()) {
3993  state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
3994  return false;
3995  }
3996  ReceivedBlockTransactions(block, pindex, blockPos);
3997  } catch (const std::runtime_error& e) {
3998  return AbortNode(state, std::string("System error: ") + e.what());
3999  }
4000 
4002 
4003  CheckBlockIndex();
4004 
4005  return true;
4006 }
4007 
4008 bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked, bool* new_block)
4009 {
4011 
4012  {
4013  CBlockIndex *pindex = nullptr;
4014  if (new_block) *new_block = false;
4015  BlockValidationState state;
4016 
4017  // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
4018  // Therefore, the following critical section must include the CheckBlock() call as well.
4019  LOCK(cs_main);
4020 
4021  // Skipping AcceptBlock() for CheckBlock() failures means that we will never mark a block as invalid if
4022  // CheckBlock() fails. This is protective against consensus failure if there are any unknown forms of block
4023  // malleability that cause CheckBlock() to fail; see e.g. CVE-2012-2459 and
4024  // https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2019-February/016697.html. Because CheckBlock() is
4025  // not very expensive, the anti-DoS benefits of caching failure (of a definitely-invalid block) are not substantial.
4026  bool ret = CheckBlock(*block, state, GetConsensus());
4027  if (ret) {
4028  // Store to disk
4029  ret = ActiveChainstate().AcceptBlock(block, state, &pindex, force_processing, nullptr, new_block, min_pow_checked);
4030  }
4031  if (!ret) {
4032  GetMainSignals().BlockChecked(*block, state);
4033  return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
4034  }
4035  }
4036 
4037  NotifyHeaderTip(ActiveChainstate());
4038 
4039  BlockValidationState state; // Only used to report errors, not invalidity - ignore it
4040  if (!ActiveChainstate().ActivateBestChain(state, block)) {
4041  return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
4042  }
4043 
4044  return true;
4045 }
4046 
4048 {
4050  Chainstate& active_chainstate = ActiveChainstate();
4051  if (!active_chainstate.GetMempool()) {
4052  TxValidationState state;
4053  state.Invalid(TxValidationResult::TX_NO_MEMPOOL, "no-mempool");
4054  return MempoolAcceptResult::Failure(state);
4055  }
4056  auto result = AcceptToMemoryPool(active_chainstate, tx, GetTime(), /*bypass_limits=*/ false, test_accept);
4057  active_chainstate.GetMempool()->check(active_chainstate.CoinsTip(), active_chainstate.m_chain.Height() + 1);
4058  return result;
4059 }
4060 
4062  const CChainParams& chainparams,
4063  Chainstate& chainstate,
4064  const CBlock& block,
4065  CBlockIndex* pindexPrev,
4066  const std::function<NodeClock::time_point()>& adjusted_time_callback,
4067  bool fCheckPOW,
4068  bool fCheckMerkleRoot)
4069 {
4071  assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip());
4072  CCoinsViewCache viewNew(&chainstate.CoinsTip());
4073  uint256 block_hash(block.GetHash());
4074  CBlockIndex indexDummy(block);
4075  indexDummy.pprev = pindexPrev;
4076  indexDummy.nHeight = pindexPrev->nHeight + 1;
4077  indexDummy.phashBlock = &block_hash;
4078 
4079  // NOTE: CheckBlockHeader is called by CheckBlock
4080  if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, pindexPrev, adjusted_time_callback()))
4081  return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
4082  if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
4083  return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
4084  if (!ContextualCheckBlock(block, state, chainstate.m_chainman, pindexPrev))
4085  return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
4086  if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, true)) {
4087  return false;
4088  }
4089  assert(state.IsValid());
4090 
4091  return true;
4092 }
4093 
4094 /* This function is called from the RPC code for pruneblockchain */
4095 void PruneBlockFilesManual(Chainstate& active_chainstate, int nManualPruneHeight)
4096 {
4097  BlockValidationState state;
4098  if (!active_chainstate.FlushStateToDisk(
4099  state, FlushStateMode::NONE, nManualPruneHeight)) {
4100  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
4101  }
4102 }
4103 
4104 void Chainstate::LoadMempool(const fs::path& load_path, FopenFn mockable_fopen_function)
4105 {
4106  if (!m_mempool) return;
4107  ::LoadMempool(*m_mempool, load_path, *this, mockable_fopen_function);
4109 }
4110 
4112 {
4114  const CCoinsViewCache& coins_cache = CoinsTip();
4115  assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
4116  const CBlockIndex* tip = m_chain.Tip();
4117 
4118  if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
4119  return true;
4120  }
4121 
4122  // Load pointer to end of best chain
4123  CBlockIndex* pindex = m_blockman.LookupBlockIndex(coins_cache.GetBestBlock());
4124  if (!pindex) {
4125  return false;
4126  }
4127  m_chain.SetTip(*pindex);
4129 
4130  tip = m_chain.Tip();
4131  LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
4132  tip->GetBlockHash().ToString(),
4133  m_chain.Height(),
4136  return true;
4137 }
4138 
4140 {
4141  uiInterface.ShowProgress(_("Verifying blocks…").translated, 0, false);
4142 }
4143 
4145 {
4146  uiInterface.ShowProgress("", 100, false);
4147 }
4148 
4150  Chainstate& chainstate,
4151  const Consensus::Params& consensus_params,
4152  CCoinsView& coinsview,
4153  int nCheckLevel, int nCheckDepth)
4154 {
4156 
4157  if (chainstate.m_chain.Tip() == nullptr || chainstate.m_chain.Tip()->pprev == nullptr) {
4158  return VerifyDBResult::SUCCESS;
4159  }
4160 
4161  // Verify blocks in the best chain
4162  if (nCheckDepth <= 0 || nCheckDepth > chainstate.m_chain.Height()) {
4163  nCheckDepth = chainstate.m_chain.Height();
4164  }
4165  nCheckLevel = std::max(0, std::min(4, nCheckLevel));
4166  LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
4167  CCoinsViewCache coins(&coinsview);
4168  CBlockIndex* pindex;
4169  CBlockIndex* pindexFailure = nullptr;
4170  int nGoodTransactions = 0;
4171  BlockValidationState state;
4172  int reportDone = 0;
4173  bool skipped_no_block_data{false};
4174  bool skipped_l3_checks{false};
4175  LogPrintf("Verification progress: 0%%\n");
4176 
4177  const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
4178 
4179  for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
4180  const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
4181  if (reportDone < percentageDone / 10) {
4182  // report every 10% step
4183  LogPrintf("Verification progress: %d%%\n", percentageDone);
4184  reportDone = percentageDone / 10;
4185  }
4186  uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
4187  if (pindex->nHeight <= chainstate.m_chain.Height() - nCheckDepth) {
4188  break;
4189  }
4190  if ((chainstate.m_blockman.IsPruneMode() || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
4191  // If pruning or running under an assumeutxo snapshot, only go
4192  // back as far as we have data.
4193  LogPrintf("VerifyDB(): block verification stopping at height %d (no data). This could be due to pruning or use of an assumeutxo snapshot.\n", pindex->nHeight);
4194  skipped_no_block_data = true;
4195  break;
4196  }
4197  CBlock block;
4198  // check level 0: read from disk
4199  if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
4200  LogPrintf("Verification error: ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4202  }
4203  // check level 1: verify block validity
4204  if (nCheckLevel >= 1 && !CheckBlock(block, state, consensus_params)) {
4205  LogPrintf("Verification error: found bad block at %d, hash=%s (%s)\n",
4206  pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
4208  }
4209  // check level 2: verify undo validity
4210  if (nCheckLevel >= 2 && pindex) {
4211  CBlockUndo undo;
4212  if (!pindex->GetUndoPos().IsNull()) {
4213  if (!UndoReadFromDisk(undo, pindex)) {
4214  LogPrintf("Verification error: found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4216  }
4217  }
4218  }
4219  // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
4220  size_t curr_coins_usage = coins.DynamicMemoryUsage() + chainstate.CoinsTip().DynamicMemoryUsage();
4221 
4222  if (nCheckLevel >= 3) {
4223  if (curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
4224  assert(coins.GetBestBlock() == pindex->GetBlockHash());
4225  DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins);
4226  if (res == DISCONNECT_FAILED) {
4227  LogPrintf("Verification error: irrecoverable inconsistency in block data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4229  }
4230  if (res == DISCONNECT_UNCLEAN) {
4231  nGoodTransactions = 0;
4232  pindexFailure = pindex;
4233  } else {
4234  nGoodTransactions += block.vtx.size();
4235  }
4236  } else {
4237  skipped_l3_checks = true;
4238  }
4239  }
4241  }
4242  if (pindexFailure) {
4243  LogPrintf("Verification error: coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
4245  }
4246  if (skipped_l3_checks) {
4247  LogPrintf("Skipped verification of level >=3 (insufficient database cache size). Consider increasing -dbcache.\n");
4248  }
4249 
4250  // store block count as we move pindex at check level >= 4
4251  int block_count = chainstate.m_chain.Height() - pindex->nHeight;
4252 
4253  // check level 4: try reconnecting blocks
4254  if (nCheckLevel >= 4 && !skipped_l3_checks) {
4255  while (pindex != chainstate.m_chain.Tip()) {
4256  const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
4257  if (reportDone < percentageDone / 10) {
4258  // report every 10% step
4259  LogPrintf("Verification progress: %d%%\n", percentageDone);
4260  reportDone = percentageDone / 10;
4261  }
4262  uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
4263  pindex = chainstate.m_chain.Next(pindex);
4264  CBlock block;
4265  if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
4266  LogPrintf("Verification error: ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4268  }
4269  if (!chainstate.ConnectBlock(block, state, pindex, coins)) {
4270  LogPrintf("Verification error: found unconnectable block at %d, hash=%s (%s)\n", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
4272  }
4274  }
4275  }
4276 
4277  LogPrintf("Verification: No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
4278 
4279  if (skipped_l3_checks) {
4281  }
4282  if (skipped_no_block_data) {
4284  }
4285  return VerifyDBResult::SUCCESS;
4286 }
4287 
4290 {
4292  // TODO: merge with ConnectBlock
4293  CBlock block;
4294  if (!ReadBlockFromDisk(block, pindex, m_chainman.GetConsensus())) {
4295  return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4296  }
4297 
4298  for (const CTransactionRef& tx : block.vtx) {
4299  if (!tx->IsCoinBase()) {
4300  for (const CTxIn &txin : tx->vin) {
4301  inputs.SpendCoin(txin.prevout);
4302  }
4303  }
4304  // Pass check = true as every addition may be an overwrite.
4305  AddCoins(inputs, *tx, pindex->nHeight, true);
4306  }
4307  return true;
4308 }
4309 
4311 {
4312  LOCK(cs_main);
4313 
4314  CCoinsView& db = this->CoinsDB();
4315  CCoinsViewCache cache(&db);
4316 
4317  std::vector<uint256> hashHeads = db.GetHeadBlocks();
4318  if (hashHeads.empty()) return true; // We're already in a consistent state.
4319  if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
4320 
4321  uiInterface.ShowProgress(_("Replaying blocks…").translated, 0, false);
4322  LogPrintf("Replaying blocks\n");
4323 
4324  const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
4325  const CBlockIndex* pindexNew; // New tip during the interrupted flush.
4326  const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
4327 
4328  if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
4329  return error("ReplayBlocks(): reorganization to unknown block requested");
4330  }
4331  pindexNew = &(m_blockman.m_block_index[hashHeads[0]]);
4332 
4333  if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
4334  if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
4335  return error("ReplayBlocks(): reorganization from unknown block requested");
4336  }
4337  pindexOld = &(m_blockman.m_block_index[hashHeads[1]]);
4338  pindexFork = LastCommonAncestor(pindexOld, pindexNew);
4339  assert(pindexFork != nullptr);
4340  }
4341 
4342  // Rollback along the old branch.
4343  while (pindexOld != pindexFork) {
4344  if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
4345  CBlock block;
4346  if (!ReadBlockFromDisk(block, pindexOld, m_chainman.GetConsensus())) {
4347  return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4348  }
4349  LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
4350  DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
4351  if (res == DISCONNECT_FAILED) {
4352  return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4353  }
4354  // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
4355  // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
4356  // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
4357  // the result is still a version of the UTXO set with the effects of that block undone.
4358  }
4359  pindexOld = pindexOld->pprev;
4360  }
4361 
4362  // Roll forward from the forking point to the new tip.
4363  int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
4364  for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
4365  const CBlockIndex& pindex{*Assert(pindexNew->GetAncestor(nHeight))};
4366 
4367  LogPrintf("Rolling forward %s (%i)\n", pindex.GetBlockHash().ToString(), nHeight);
4368  uiInterface.ShowProgress(_("Replaying blocks…").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
4369  if (!RollforwardBlock(&pindex, cache)) return false;
4370  }
4371 
4372  cache.SetBestBlock(pindexNew->GetBlockHash());
4373  cache.Flush();
4374  uiInterface.ShowProgress("", 100, false);
4375  return true;
4376 }
4377 
4379 {
4381 
4382  // At and above m_params.SegwitHeight, segwit consensus rules must be validated
4383  CBlockIndex* block{m_chain.Tip()};
4384 
4385  while (block != nullptr && DeploymentActiveAt(*block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) {
4386  if (!(block->nStatus & BLOCK_OPT_WITNESS)) {
4387  // block is insufficiently validated for a segwit client
4388  return true;
4389  }
4390  block = block->pprev;
4391  }
4392 
4393  return false;
4394 }
4395 
4396 void Chainstate::UnloadBlockIndex()
4397 {
4399  nBlockSequenceId = 1;
4400  setBlockIndexCandidates.clear();
4401 }
4402 
4404 {
4406  // Load block index from databases
4407  bool needs_init = fReindex;
4408  if (!fReindex) {
4409  bool ret = m_blockman.LoadBlockIndexDB(GetConsensus());
4410  if (!ret) return false;
4411 
4412  m_blockman.ScanAndUnlinkAlreadyPrunedFiles();
4413 
4414  std::vector<CBlockIndex*> vSortedByHeight{m_blockman.GetAllBlockIndices()};
4415  std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
4417 
4418  // Find start of assumed-valid region.
4419  int first_assumed_valid_height = std::numeric_limits<int>::max();
4420 
4421  for (const CBlockIndex* block : vSortedByHeight) {
4422  if (block->IsAssumedValid()) {
4423  auto chainstates = GetAll();
4424 
4425  // If we encounter an assumed-valid block index entry, ensure that we have
4426  // one chainstate that tolerates assumed-valid entries and another that does
4427  // not (i.e. the background validation chainstate), since assumed-valid
4428  // entries should always be pending validation by a fully-validated chainstate.
4429  auto any_chain = [&](auto fnc) { return std::any_of(chainstates.cbegin(), chainstates.cend(), fnc); };
4430  assert(any_chain([](auto chainstate) { return chainstate->reliesOnAssumedValid(); }));
4431  assert(any_chain([](auto chainstate) { return !chainstate->reliesOnAssumedValid(); }));
4432 
4433  first_assumed_valid_height = block->nHeight;
4434  LogPrintf("Saw first assumedvalid block at height %d (%s)\n",
4435  first_assumed_valid_height, block->ToString());
4436  break;
4437  }
4438  }
4439 
4440  for (CBlockIndex* pindex : vSortedByHeight) {
4441  if (ShutdownRequested()) return false;
4442  if (pindex->IsAssumedValid() ||
4443  (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
4444  (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
4445 
4446  // Fill each chainstate's block candidate set. Only add assumed-valid
4447  // blocks to the tip candidate set if the chainstate is allowed to rely on
4448  // assumed-valid blocks.
4449  //
4450  // If all setBlockIndexCandidates contained the assumed-valid blocks, the
4451  // background chainstate's ActivateBestChain() call would add assumed-valid
4452  // blocks to the chain (based on how FindMostWorkChain() works). Obviously
4453  // we don't want this since the purpose of the background validation chain
4454  // is to validate assued-valid blocks.
4455  //
4456  // Note: This is considering all blocks whose height is greater or equal to
4457  // the first assumed-valid block to be assumed-valid blocks, and excluding
4458  // them from the background chainstate's setBlockIndexCandidates set. This
4459  // does mean that some blocks which are not technically assumed-valid
4460  // (later blocks on a fork beginning before the first assumed-valid block)
4461  // might not get added to the background chainstate, but this is ok,
4462  // because they will still be attached to the active chainstate if they
4463  // actually contain more work.
4464  //
4465  // Instead of this height-based approach, an earlier attempt was made at
4466  // detecting "holistically" whether the block index under consideration
4467  // relied on an assumed-valid ancestor, but this proved to be too slow to
4468  // be practical.
4469  for (Chainstate* chainstate : GetAll()) {
4470  if (chainstate->reliesOnAssumedValid() ||
4471  pindex->nHeight < first_assumed_valid_height) {
4472  chainstate->setBlockIndexCandidates.insert(pindex);
4473  }
4474  }
4475  }
4476  if (pindex->nStatus & BLOCK_FAILED_MASK && (!m_best_invalid || pindex->nChainWork > m_best_invalid->nChainWork)) {
4477  m_best_invalid = pindex;
4478  }
4479  if (pindex->IsValid(BLOCK_VALID_TREE) && (m_best_header == nullptr || CBlockIndexWorkComparator()(m_best_header, pindex)))
4480  m_best_header = pindex;
4481  }
4482 
4483  needs_init = m_blockman.m_block_index.empty();
4484  }
4485 
4486  if (needs_init) {
4487  // Everything here is for *new* reindex/DBs. Thus, though
4488  // LoadBlockIndexDB may have set fReindex if we shut down
4489  // mid-reindex previously, we don't check fReindex and
4490  // instead only check it prior to LoadBlockIndexDB to set
4491  // needs_init.
4492 
4493  LogPrintf("Initializing databases...\n");
4494  }
4495  return true;
4496 }
4497 
4499 {
4500  LOCK(cs_main);
4501 
4502  const CChainParams& params{m_chainman.GetParams()};
4503 
4504  // Check whether we're already initialized by checking for genesis in
4505  // m_blockman.m_block_index. Note that we can't use m_chain here, since it is
4506  // set based on the coins db, not the block index db, which is the only
4507  // thing loaded at this point.
4508  if (m_blockman.m_block_index.count(params.GenesisBlock().GetHash()))
4509  return true;
4510 
4511  try {
4512  const CBlock& block = params.GenesisBlock();
4513  FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0, m_chain, params, nullptr)};
4514  if (blockPos.IsNull()) {
4515  return error("%s: writing genesis block to disk failed", __func__);
4516  }
4518  ReceivedBlockTransactions(block, pindex, blockPos);
4519  } catch (const std::runtime_error& e) {
4520  return error("%s: failed to write genesis block: %s", __func__, e.what());
4521  }
4522 
4523  return true;
4524 }
4525 
4527  FILE* fileIn,
4528  FlatFilePos* dbp,
4529  std::multimap<uint256, FlatFilePos>* blocks_with_unknown_parent)
4530 {
4532 
4533  // Either both should be specified (-reindex), or neither (-loadblock).
4534  assert(!dbp == !blocks_with_unknown_parent);
4535 
4536  const auto start{SteadyClock::now()};
4537  const CChainParams& params{m_chainman.GetParams()};
4538 
4539  int nLoaded = 0;
4540  try {
4541  // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4543  // nRewind indicates where to resume scanning in case something goes wrong,
4544  // such as a block fails to deserialize.
4545  uint64_t nRewind = blkdat.GetPos();
4546  while (!blkdat.eof()) {
4547  if (ShutdownRequested()) return;
4548 
4549  blkdat.SetPos(nRewind);
4550  nRewind++; // start one byte further next time, in case of failure
4551  blkdat.SetLimit(); // remove former limit
4552  unsigned int nSize = 0;
4553  try {
4554  // locate a header
4555  unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
4556  blkdat.FindByte(params.MessageStart()[0]);
4557  nRewind = blkdat.GetPos() + 1;
4558  blkdat >> buf;
4559  if (memcmp(buf, params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE)) {
4560  continue;
4561  }
4562  // read size
4563  blkdat >> nSize;
4564  if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
4565  continue;
4566  } catch (const std::exception&) {
4567  // no valid block header found; don't complain
4568  // (this happens at the end of every blk.dat file)
4569  break;
4570  }
4571  try {
4572  // read block header
4573  const uint64_t nBlockPos{blkdat.GetPos()};
4574  if (dbp)
4575  dbp->nPos = nBlockPos;
4576  blkdat.SetLimit(nBlockPos + nSize);
4577  CBlockHeader header;
4578  blkdat >> header;
4579  const uint256 hash{header.GetHash()};
4580  // Skip the rest of this block (this may read from disk into memory); position to the marker before the
4581  // next block, but it's still possible to rewind to the start of the current block (without a disk read).
4582  nRewind = nBlockPos + nSize;
4583  blkdat.SkipTo(nRewind);
4584  {
4585  LOCK(cs_main);
4586  // detect out of order blocks, and store them for later
4587  if (hash != params.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(header.hashPrevBlock)) {
4588  LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
4589  header.hashPrevBlock.ToString());
4590  if (dbp && blocks_with_unknown_parent) {
4591  blocks_with_unknown_parent->emplace(header.hashPrevBlock, *dbp);
4592  }
4593  continue;
4594  }
4595 
4596  // process in case the block isn't known yet
4597  const CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash);
4598  if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
4599  // This block can be processed immediately; rewind to its start, read and deserialize it.
4600  blkdat.SetPos(nBlockPos);
4601  std::shared_ptr<CBlock> pblock{std::make_shared<CBlock>()};
4602  blkdat >> *pblock;
4603  nRewind = blkdat.GetPos();
4604 
4605  BlockValidationState state;
4606  if (AcceptBlock(pblock, state, nullptr, true, dbp, nullptr, true)) {
4607  nLoaded++;
4608  }
4609  if (state.IsError()) {
4610  break;
4611  }
4612  } else if (hash != params.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
4613  LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
4614  }
4615  }
4616 
4617  // Activate the genesis block so normal node progress can continue
4618  if (hash == params.GetConsensus().hashGenesisBlock) {
4619  BlockValidationState state;
4620  if (!ActivateBestChain(state, nullptr)) {
4621  break;
4622  }
4623  }
4624 
4625  NotifyHeaderTip(*this);
4626 
4627  if (!blocks_with_unknown_parent) continue;
4628 
4629  // Recursively process earlier encountered successors of this block
4630  std::deque<uint256> queue;
4631  queue.push_back(hash);
4632  while (!queue.empty()) {
4633  uint256 head = queue.front();
4634  queue.pop_front();
4635  auto range = blocks_with_unknown_parent->equal_range(head);
4636  while (range.first != range.second) {
4637  std::multimap<uint256, FlatFilePos>::iterator it = range.first;
4638  std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
4639  if (ReadBlockFromDisk(*pblockrecursive, it->second, params.GetConsensus())) {
4640  LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
4641  head.ToString());
4642  LOCK(cs_main);
4643  BlockValidationState dummy;
4644  if (AcceptBlock(pblockrecursive, dummy, nullptr, true, &it->second, nullptr, true)) {
4645  nLoaded++;
4646  queue.push_back(pblockrecursive->GetHash());
4647  }
4648  }
4649  range.first++;
4650  blocks_with_unknown_parent->erase(it);
4651  NotifyHeaderTip(*this);
4652  }
4653  }
4654  } catch (const std::exception& e) {
4655  // historical bugs added extra data to the block files that does not deserialize cleanly.
4656  // commonly this data is between readable blocks, but it does not really matter. such data is not fatal to the import process.
4657  // the code that reads the block files deals with invalid data by simply ignoring it.
4658  // it continues to search for the next {4 byte magic message start bytes + 4 byte length + block} that does deserialize cleanly
4659  // and passes all of the other block validation checks dealing with POW and the merkle root, etc...
4660  // we merely note with this informational log message when unexpected data is encountered.
4661  // we could also be experiencing a storage system read error, or a read of a previous bad write. these are possible, but
4662  // less likely scenarios. we don't have enough information to tell a difference here.
4663  // the reindex process is not the place to attempt to clean and/or compact the block files. if so desired, a studious node operator
4664  // may use knowledge of the fact that the block files are not entirely pristine in order to prepare a set of pristine, and
4665  // perhaps ordered, block files for later reindexing.
4666  LogPrint(BCLog::REINDEX, "%s: unexpected data at file offset 0x%x - %s. continuing\n", __func__, (nRewind - 1), e.what());
4667  }
4668  }
4669  } catch (const std::runtime_error& e) {
4670  AbortNode(std::string("System error: ") + e.what());
4671  }
4672  LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));
4673 }
4674 
4676 {
4678  return;
4679  }
4680 
4681  LOCK(cs_main);
4682 
4683  // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4684  // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the
4685  // tests when iterating the block tree require that m_chain has been initialized.)
4686  if (m_chain.Height() < 0) {
4687  assert(m_blockman.m_block_index.size() <= 1);
4688  return;
4689  }
4690 
4691  // Build forward-pointing map of the entire block tree.
4692  std::multimap<CBlockIndex*,CBlockIndex*> forward;
4693  for (auto& [_, block_index] : m_blockman.m_block_index) {
4694  forward.emplace(block_index.pprev, &block_index);
4695  }
4696 
4697  assert(forward.size() == m_blockman.m_block_index.size());
4698 
4699  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
4700  CBlockIndex *pindex = rangeGenesis.first->second;
4701  rangeGenesis.first++;
4702  assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
4703 
4704  // Iterate over the entire block tree, using depth-first search.
4705  // Along the way, remember whether there are blocks on the path from genesis
4706  // block being explored which are the first to have certain properties.
4707  size_t nNodes = 0;
4708  int nHeight = 0;
4709  CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
4710  CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4711  CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
4712  CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4713  CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4714  CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4715  CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4716  while (pindex != nullptr) {
4717  nNodes++;
4718  if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
4719  // Assumed-valid index entries will not have data since we haven't downloaded the
4720  // full block yet.
4721  if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA) && !pindex->IsAssumedValid()) {
4722  pindexFirstMissing = pindex;
4723  }
4724  if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
4725  if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
4726 
4727  if (pindex->pprev != nullptr && !pindex->IsAssumedValid()) {
4728  // Skip validity flag checks for BLOCK_ASSUMED_VALID index entries, since these
4729  // *_VALID_MASK flags will not be present for index entries we are temporarily assuming
4730  // valid.
4731  if (pindexFirstNotTransactionsValid == nullptr &&
4732  (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) {
4733  pindexFirstNotTransactionsValid = pindex;
4734  }
4735 
4736  if (pindexFirstNotChainValid == nullptr &&
4737  (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) {
4738  pindexFirstNotChainValid = pindex;
4739  }
4740 
4741  if (pindexFirstNotScriptsValid == nullptr &&
4742  (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) {
4743  pindexFirstNotScriptsValid = pindex;
4744  }
4745  }
4746 
4747  // Begin: actual consistency checks.
4748  if (pindex->pprev == nullptr) {
4749  // Genesis block checks.
4750  assert(pindex->GetBlockHash() == m_chainman.GetConsensus().hashGenesisBlock); // Genesis block's hash must match.
4751  assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
4752  }
4753  if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
4754  // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4755  // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4756  // Unless these indexes are assumed valid and pending block download on a
4757  // background chainstate.
4758  if (!m_blockman.m_have_pruned && !pindex->IsAssumedValid()) {
4759  // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4760  assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
4761  assert(pindexFirstMissing == pindexFirstNeverProcessed);
4762  } else {
4763  // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4764  if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
4765  }
4766  if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
4767  if (pindex->IsAssumedValid()) {
4768  // Assumed-valid blocks should have some nTx value.
4769  assert(pindex->nTx > 0);
4770  // Assumed-valid blocks should connect to the main chain.
4771  assert((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE);
4772  } else {
4773  // Otherwise there should only be an nTx value if we have
4774  // actually seen a block's transactions.
4775  assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
4776  }
4777  // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
4778  assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
4779  assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
4780  assert(pindex->nHeight == nHeight); // nHeight must be consistent.
4781  assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
4782  assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
4783  assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid
4784  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
4785  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
4786  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
4787  if (pindexFirstInvalid == nullptr) {
4788  // Checks for not-invalid blocks.
4789  assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
4790  }
4791  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
4792  if (pindexFirstInvalid == nullptr) {
4793  const bool is_active = this == &m_chainman.ActiveChainstate();
4794 
4795  // If this block sorts at least as good as the current tip and
4796  // is valid and we have all data for its parents, it must be in
4797  // setBlockIndexCandidates. m_chain.Tip() must also be there
4798  // even if some data has been pruned.
4799  //
4800  // Don't perform this check for the background chainstate since
4801  // its setBlockIndexCandidates shouldn't have some entries (i.e. those past the
4802  // snapshot block) which do exist in the block index for the active chainstate.
4803  if (is_active && (pindexFirstMissing == nullptr || pindex == m_chain.Tip())) {
4804  assert(setBlockIndexCandidates.count(pindex));
4805  }
4806  // If some parent is missing, then it could be that this block was in
4807  // setBlockIndexCandidates but had to be removed because of the missing data.
4808  // In this case it must be in m_blocks_unlinked -- see test below.
4809  }
4810  } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4811  assert(setBlockIndexCandidates.count(pindex) == 0);
4812  }
4813  // Check whether this block is in m_blocks_unlinked.
4814  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
4815  bool foundInUnlinked = false;
4816  while (rangeUnlinked.first != rangeUnlinked.second) {
4817  assert(rangeUnlinked.first->first == pindex->pprev);
4818  if (rangeUnlinked.first->second == pindex) {
4819  foundInUnlinked = true;
4820  break;
4821  }
4822  rangeUnlinked.first++;
4823  }
4824  if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
4825  // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked.
4826  assert(foundInUnlinked);
4827  }
4828  if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA
4829  if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
4830  if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
4831  // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4832  assert(m_blockman.m_have_pruned); // We must have pruned.
4833  // This block may have entered m_blocks_unlinked if:
4834  // - it has a descendant that at some point had more work than the
4835  // tip, and
4836  // - we tried switching to that descendant but were missing
4837  // data for some intermediate block between m_chain and the
4838  // tip.
4839  // So if this block is itself better than m_chain.Tip() and it wasn't in
4840  // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
4841  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
4842  if (pindexFirstInvalid == nullptr) {
4843  assert(foundInUnlinked);
4844  }
4845  }
4846  }
4847  // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4848  // End: actual consistency checks.
4849 
4850  // Try descending into the first subnode.
4851  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
4852  if (range.first != range.second) {
4853  // A subnode was found.
4854  pindex = range.first->second;
4855  nHeight++;
4856  continue;
4857  }
4858  // This is a leaf node.
4859  // Move upwards until we reach a node of which we have not yet visited the last child.
4860  while (pindex) {
4861  // We are going to either move to a parent or a sibling of pindex.
4862  // If pindex was the first with a certain property, unset the corresponding variable.
4863  if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
4864  if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
4865  if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
4866  if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
4867  if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
4868  if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
4869  if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
4870  // Find our parent.
4871  CBlockIndex* pindexPar = pindex->pprev;
4872  // Find which child we just visited.
4873  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
4874  while (rangePar.first->second != pindex) {
4875  assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
4876  rangePar.first++;
4877  }
4878  // Proceed to the next one.
4879  rangePar.first++;
4880  if (rangePar.first != rangePar.second) {
4881  // Move to the sibling.
4882  pindex = rangePar.first->second;
4883  break;
4884  } else {
4885  // Move up further.
4886  pindex = pindexPar;
4887  nHeight--;
4888  continue;
4889  }
4890  }
4891  }
4892 
4893  // Check that we actually traversed the entire map.
4894  assert(nNodes == forward.size());
4895 }
4896 
4897 std::string Chainstate::ToString()
4898 {
4900  CBlockIndex* tip = m_chain.Tip();
4901  return strprintf("Chainstate [%s] @ height %d (%s)",
4902  m_from_snapshot_blockhash ? "snapshot" : "ibd",
4903  tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
4904 }
4905 
4906 bool Chainstate::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
4907 {
4909  if (coinstip_size == m_coinstip_cache_size_bytes &&
4910  coinsdb_size == m_coinsdb_cache_size_bytes) {
4911  // Cache sizes are unchanged, no need to continue.
4912  return true;
4913  }
4914  size_t old_coinstip_size = m_coinstip_cache_size_bytes;
4915  m_coinstip_cache_size_bytes = coinstip_size;
4916  m_coinsdb_cache_size_bytes = coinsdb_size;
4917  CoinsDB().ResizeCache(coinsdb_size);
4918 
4919  LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n",
4920  this->ToString(), coinsdb_size * (1.0 / 1024 / 1024));
4921  LogPrintf("[%s] resized coinstip cache to %.1f MiB\n",
4922  this->ToString(), coinstip_size * (1.0 / 1024 / 1024));
4923 
4924  BlockValidationState state;
4925  bool ret;
4926 
4927  if (coinstip_size > old_coinstip_size) {
4928  // Likely no need to flush if cache sizes have grown.
4930  } else {
4931  // Otherwise, flush state to disk and deallocate the in-memory coins map.
4934  }
4935  return ret;
4936 }
4937 
4940 double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pindex) {
4941  if (pindex == nullptr)
4942  return 0.0;
4943 
4944  int64_t nNow = time(nullptr);
4945 
4946  double fTxTotal;
4947 
4948  if (pindex->nChainTx <= data.nTxCount) {
4949  fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
4950  } else {
4951  fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate;
4952  }
4953 
4954  return std::min<double>(pindex->nChainTx / fTxTotal, 1.0);
4955 }
4956 
4957 std::optional<uint256> ChainstateManager::SnapshotBlockhash() const
4958 {
4959  LOCK(::cs_main);
4960  if (m_active_chainstate && m_active_chainstate->m_from_snapshot_blockhash) {
4961  // If a snapshot chainstate exists, it will always be our active.
4962  return m_active_chainstate->m_from_snapshot_blockhash;
4963  }
4964  return std::nullopt;
4965 }
4966 
4967 std::vector<Chainstate*> ChainstateManager::GetAll()
4968 {
4969  LOCK(::cs_main);
4970  std::vector<Chainstate*> out;
4971 
4972  for (Chainstate* cs : {m_ibd_chainstate.get(), m_snapshot_chainstate.get()}) {
4973  if (this->IsUsable(cs)) out.push_back(cs);
4974  }
4975 
4976  return out;
4977 }
4978 
4979 Chainstate& ChainstateManager::InitializeChainstate(CTxMemPool* mempool)
4980 {
4982  assert(!m_ibd_chainstate);
4983  assert(!m_active_chainstate);
4984 
4985  m_ibd_chainstate = std::make_unique<Chainstate>(mempool, m_blockman, *this);
4986  m_active_chainstate = m_ibd_chainstate.get();
4987  return *m_active_chainstate;
4988 }
4989 
4991  const int height, const CChainParams& chainparams)
4992 {
4993  const MapAssumeutxo& valid_assumeutxos_map = chainparams.Assumeutxo();
4994  const auto assumeutxo_found = valid_assumeutxos_map.find(height);
4995 
4996  if (assumeutxo_found != valid_assumeutxos_map.end()) {
4997  return &assumeutxo_found->second;
4998  }
4999  return nullptr;
5000 }
5001 
5002 static bool DeleteCoinsDBFromDisk(const fs::path db_path, bool is_snapshot)
5004 {
5006 
5007  if (is_snapshot) {
5008  fs::path base_blockhash_path = db_path / node::SNAPSHOT_BLOCKHASH_FILENAME;
5009 
5010  if (fs::exists(base_blockhash_path)) {
5011  bool removed = fs::remove(base_blockhash_path);
5012  if (!removed) {
5013  LogPrintf("[snapshot] failed to remove file %s\n",
5014  fs::PathToString(base_blockhash_path));
5015  }
5016  } else {
5017  LogPrintf("[snapshot] snapshot chainstate dir being removed lacks %s file\n",
5019  }
5020  }
5021 
5022  std::string path_str = fs::PathToString(db_path);
5023  LogPrintf("Removing leveldb dir at %s\n", path_str);
5024 
5025  // We have to destruct before this call leveldb::DB in order to release the db
5026  // lock, otherwise `DestroyDB` will fail. See `leveldb::~DBImpl()`.
5027  const bool destroyed = dbwrapper::DestroyDB(path_str, {}).ok();
5028 
5029  if (!destroyed) {
5030  LogPrintf("error: leveldb DestroyDB call failed on %s\n", path_str);
5031  }
5032 
5033  // Datadir should be removed from filesystem; otherwise initialization may detect
5034  // it on subsequent statups and get confused.
5035  //
5036  // If the base_blockhash_path removal above fails in the case of snapshot
5037  // chainstates, this will return false since leveldb won't remove a non-empty
5038  // directory.
5039  return destroyed && !fs::exists(db_path);
5040 }
5041 
5043  AutoFile& coins_file,
5044  const SnapshotMetadata& metadata,
5045  bool in_memory)
5046 {
5047  uint256 base_blockhash = metadata.m_base_blockhash;
5048 
5049  if (this->SnapshotBlockhash()) {
5050  LogPrintf("[snapshot] can't activate a snapshot-based chainstate more than once\n");
5051  return false;
5052  }
5053 
5054  int64_t current_coinsdb_cache_size{0};
5055  int64_t current_coinstip_cache_size{0};
5056 
5057  // Cache percentages to allocate to each chainstate.
5058  //
5059  // These particular percentages don't matter so much since they will only be
5060  // relevant during snapshot activation; caches are rebalanced at the conclusion of
5061  // this function. We want to give (essentially) all available cache capacity to the
5062  // snapshot to aid the bulk load later in this function.
5063  static constexpr double IBD_CACHE_PERC = 0.01;
5064  static constexpr double SNAPSHOT_CACHE_PERC = 0.99;
5065 
5066  {
5067  LOCK(::cs_main);
5068  // Resize the coins caches to ensure we're not exceeding memory limits.
5069  //
5070  // Allocate the majority of the cache to the incoming snapshot chainstate, since
5071  // (optimistically) getting to its tip will be the top priority. We'll need to call
5072  // `MaybeRebalanceCaches()` once we're done with this function to ensure
5073  // the right allocation (including the possibility that no snapshot was activated
5074  // and that we should restore the active chainstate caches to their original size).
5075  //
5076  current_coinsdb_cache_size = this->ActiveChainstate().m_coinsdb_cache_size_bytes;
5077  current_coinstip_cache_size = this->ActiveChainstate().m_coinstip_cache_size_bytes;
5078 
5079  // Temporarily resize the active coins cache to make room for the newly-created
5080  // snapshot chain.
5081  this->ActiveChainstate().ResizeCoinsCaches(
5082  static_cast<size_t>(current_coinstip_cache_size * IBD_CACHE_PERC),
5083  static_cast<size_t>(current_coinsdb_cache_size * IBD_CACHE_PERC));
5084  }
5085 
5086  auto snapshot_chainstate = WITH_LOCK(::cs_main,
5087  return std::make_unique<Chainstate>(
5088  /*mempool=*/nullptr, m_blockman, *this, base_blockhash));
5089 
5090  {
5091  LOCK(::cs_main);
5092  snapshot_chainstate->InitCoinsDB(
5093  static_cast<size_t>(current_coinsdb_cache_size * SNAPSHOT_CACHE_PERC),
5094  in_memory, false, "chainstate");
5095  snapshot_chainstate->InitCoinsCache(
5096  static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC));