Bitcoin ABC  0.26.3
P2P Digital Currency
validation.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2018 The Bitcoin Core developers
3 // Copyright (c) 2017-2020 The Bitcoin developers
4 // Distributed under the MIT software license, see the accompanying
5 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
6 
7 #include <validation.h>
8 
9 #include <arith_uint256.h>
10 #include <avalanche/avalanche.h>
11 #include <avalanche/processor.h>
12 #include <blockvalidity.h>
13 #include <chainparams.h>
14 #include <checkpoints.h>
15 #include <checkqueue.h>
16 #include <config.h>
17 #include <consensus/activation.h>
18 #include <consensus/amount.h>
19 #include <consensus/merkle.h>
20 #include <consensus/tx_check.h>
21 #include <consensus/tx_verify.h>
22 #include <consensus/validation.h>
23 #include <deploymentstatus.h>
24 #include <hash.h>
25 #include <index/blockfilterindex.h>
26 #include <logging.h>
27 #include <logging/timer.h>
28 #include <minerfund.h>
29 #include <node/blockstorage.h>
30 #include <node/coinstats.h>
31 #include <node/ui_interface.h>
32 #include <node/utxo_snapshot.h>
33 #include <policy/mempool.h>
34 #include <policy/policy.h>
35 #include <policy/settings.h>
36 #include <pow/pow.h>
37 #include <primitives/block.h>
38 #include <primitives/transaction.h>
39 #include <random.h>
40 #include <reverse_iterator.h>
41 #include <script/script.h>
42 #include <script/scriptcache.h>
43 #include <script/sigcache.h>
44 #include <shutdown.h>
45 #include <timedata.h>
46 #include <tinyformat.h>
47 #include <txdb.h>
48 #include <txmempool.h>
49 #include <undo.h>
50 #include <util/check.h> // For NDEBUG compile time check
51 #include <util/strencodings.h>
52 #include <util/system.h>
53 #include <util/trace.h>
54 #include <util/translation.h>
55 #include <validationinterface.h>
56 #include <warnings.h>
57 
58 #include <boost/algorithm/string/replace.hpp>
59 
60 #include <algorithm>
61 #include <numeric>
62 #include <optional>
63 #include <string>
64 #include <thread>
65 
67 using node::BlockManager;
68 using node::BlockMap;
69 using node::CCoinsStats;
71 using node::fImporting;
72 using node::fPruneMode;
73 using node::fReindex;
74 using node::GetUTXOStats;
75 using node::nPruneTarget;
82 
83 #define MICRO 0.000001
84 #define MILLI 0.001
85 
87 static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
89 static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
90 const std::vector<std::string> CHECKLEVEL_DOC{
91  "level 0 reads the blocks from disk",
92  "level 1 verifies block validity",
93  "level 2 verifies undo data",
94  "level 3 checks disconnection of tip blocks",
95  "level 4 tries to reconnect the blocks",
96  "each level includes the checks of the previous levels",
97 };
98 
112 
114 std::condition_variable g_best_block_cv;
116 bool fRequireStandard = true;
117 bool fCheckBlockIndex = false;
120 
123 
125 
127  : excessiveBlockSize(config.GetMaxBlockSize()), checkPoW(true),
128  checkMerkleRoot(true) {}
129 
130 const CBlockIndex *
133 
134  // Find the latest block common to locator and chain - we expect that
135  // locator.vHave is sorted descending by height.
136  for (const BlockHash &hash : locator.vHave) {
137  const CBlockIndex *pindex{m_blockman.LookupBlockIndex(hash)};
138  if (pindex) {
139  if (m_chain.Contains(pindex)) {
140  return pindex;
141  }
142  if (pindex->GetAncestor(m_chain.Height()) == m_chain.Tip()) {
143  return m_chain.Tip();
144  }
145  }
146  }
147  return m_chain.Genesis();
148 }
149 
150 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
151  const CBlockIndex *pindex);
152 
153 bool CheckSequenceLocksAtTip(CBlockIndex *tip, const CCoinsView &coins_view,
154  const CTransaction &tx, LockPoints *lp,
155  bool useExistingLockPoints) {
156  assert(tip != nullptr);
157 
158  CBlockIndex index;
159  index.pprev = tip;
160  // CheckSequenceLocksAtTip() uses active_chainstate.m_chain.Height()+1 to
161  // evaluate height based locks because when SequenceLocks() is called within
162  // ConnectBlock(), the height of the block *being* evaluated is what is
163  // used. Thus if we want to know if a transaction can be part of the *next*
164  // block, we need to use one more than active_chainstate.m_chain.Height()
165  index.nHeight = tip->nHeight + 1;
166 
167  std::pair<int, int64_t> lockPair;
168  if (useExistingLockPoints) {
169  assert(lp);
170  lockPair.first = lp->height;
171  lockPair.second = lp->time;
172  } else {
173  std::vector<int> prevheights;
174  prevheights.resize(tx.vin.size());
175  for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
176  const CTxIn &txin = tx.vin[txinIndex];
177  Coin coin;
178  if (!coins_view.GetCoin(txin.prevout, coin)) {
179  return error("%s: Missing input", __func__);
180  }
181  if (coin.GetHeight() == MEMPOOL_HEIGHT) {
182  // Assume all mempool transaction confirm in the next block
183  prevheights[txinIndex] = tip->nHeight + 1;
184  } else {
185  prevheights[txinIndex] = coin.GetHeight();
186  }
187  }
189  prevheights, index);
190  if (lp) {
191  lp->height = lockPair.first;
192  lp->time = lockPair.second;
193  // Also store the hash of the block with the highest height of all
194  // the blocks which have sequence locked prevouts. This hash needs
195  // to still be on the chain for these LockPoint calculations to be
196  // valid.
197  // Note: It is impossible to correctly calculate a maxInputBlock if
198  // any of the sequence locked inputs depend on unconfirmed txs,
199  // except in the special case where the relative lock time/height is
200  // 0, which is equivalent to no sequence lock. Since we assume input
201  // height of tip+1 for mempool txs and test the resulting lockPair
202  // from CalculateSequenceLocks against tip+1. We know
203  // EvaluateSequenceLocks will fail if there was a non-zero sequence
204  // lock on a mempool input, so we can use the return value of
205  // CheckSequenceLocksAtTip to indicate the LockPoints validity.
206  int maxInputHeight = 0;
207  for (const int height : prevheights) {
208  // Can ignore mempool inputs since we'll fail if they had
209  // non-zero locks.
210  if (height != tip->nHeight + 1) {
211  maxInputHeight = std::max(maxInputHeight, height);
212  }
213  }
214  lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
215  }
216  }
217  return EvaluateSequenceLocks(index, lockPair);
218 }
219 
220 // Command-line argument "-replayprotectionactivationtime=<timestamp>" will
221 // cause the node to switch to replay protected SigHash ForkID value when the
222 // median timestamp of the previous 11 blocks is greater than or equal to
223 // <timestamp>. Defaults to the pre-defined timestamp when not set.
225  int64_t nMedianTimePast) {
226  return nMedianTimePast >= gArgs.GetIntArg("-replayprotectionactivationtime",
227  params.wellingtonActivationTime);
228 }
229 
231  const CBlockIndex *pindexPrev) {
232  if (pindexPrev == nullptr) {
233  return false;
234  }
235 
236  return IsReplayProtectionEnabled(params, pindexPrev->GetMedianTimePast());
237 }
238 
245  const CTransaction &tx, TxValidationState &state,
246  const CCoinsViewCache &view, const CTxMemPool &pool, const uint32_t flags,
247  PrecomputedTransactionData &txdata, int &nSigChecksOut,
250  AssertLockHeld(pool.cs);
251 
252  assert(!tx.IsCoinBase());
253  for (const CTxIn &txin : tx.vin) {
254  const Coin &coin = view.AccessCoin(txin.prevout);
255 
256  // This coin was checked in PreChecks and MemPoolAccept
257  // has been holding cs_main since then.
258  Assume(!coin.IsSpent());
259  if (coin.IsSpent()) {
260  return false;
261  }
262 
263  // If the Coin is available, there are 2 possibilities:
264  // it is available in our current ChainstateActive UTXO set,
265  // or it's a UTXO provided by a transaction in our mempool.
266  // Ensure the scriptPubKeys in Coins from CoinsView are correct.
267  const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId());
268  if (txFrom) {
269  assert(txFrom->GetId() == txin.prevout.GetTxId());
270  assert(txFrom->vout.size() > txin.prevout.GetN());
271  assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut());
272  } else {
273  const Coin &coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
274  assert(!coinFromUTXOSet.IsSpent());
275  assert(coinFromUTXOSet.GetTxOut() == coin.GetTxOut());
276  }
277  }
278 
279  // Call CheckInputScripts() to cache signature and script validity against
280  // current tip consensus rules.
281  return CheckInputScripts(tx, state, view, flags, /*sigCacheStore=*/true,
282  /*scriptCacheStore=*/true, txdata, nSigChecksOut);
283 }
284 
285 namespace {
286 
287 class MemPoolAccept {
288 public:
289  MemPoolAccept(CTxMemPool &mempool, CChainState &active_chainstate)
290  : m_pool(mempool), m_view(&m_dummy),
291  m_viewmempool(&active_chainstate.CoinsTip(), m_pool),
292  m_active_chainstate(active_chainstate),
293  m_limit_ancestors(
294  gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
295  m_limit_ancestor_size(gArgs.GetIntArg("-limitancestorsize",
297  1000),
298  m_limit_descendants(gArgs.GetIntArg("-limitdescendantcount",
300  m_limit_descendant_size(
301  gArgs.GetIntArg("-limitdescendantsize",
303  1000) {}
304 
305  // We put the arguments we're handed into a struct, so we can pass them
306  // around easier.
307  struct ATMPArgs {
308  const Config &m_config;
309  const int64_t m_accept_time;
310  const bool m_bypass_limits;
311  /*
312  * Return any outpoints which were not previously present in the coins
313  * cache, but were added as a result of validating the tx for mempool
314  * acceptance. This allows the caller to optionally remove the cache
315  * additions if the associated transaction ends up being rejected by
316  * the mempool.
317  */
318  std::vector<COutPoint> &m_coins_to_uncache;
319  const bool m_test_accept;
325  const bool m_package_submission;
326 
328  static ATMPArgs SingleAccept(const Config &config, int64_t accept_time,
329  bool bypass_limits,
330  std::vector<COutPoint> &coins_to_uncache,
331  bool test_accept) {
332  return ATMPArgs{config,
333  accept_time,
334  bypass_limits,
335  coins_to_uncache,
336  test_accept,
337  /*m_package_submission=*/false};
338  }
339 
344  static ATMPArgs
345  PackageTestAccept(const Config &config, int64_t accept_time,
346  std::vector<COutPoint> &coins_to_uncache) {
347  return ATMPArgs{config, accept_time,
348  /*m_bypass_limits=*/false, coins_to_uncache,
349  /*m_test_accept=*/true,
350  // not submitting to mempool
351  /*m_package_submission=*/false};
352  }
353 
355  static ATMPArgs
356  PackageChildWithParents(const Config &config, int64_t accept_time,
357  std::vector<COutPoint> &coins_to_uncache) {
358  return ATMPArgs{config,
359  accept_time,
360  /*m_bypass_limits=*/false,
361  coins_to_uncache,
362  /*m_test_accept=*/false,
363  /*m_package_submission=*/true};
364  }
365  // No default ctor to avoid exposing details to clients and allowing the
366  // possibility of mixing up the order of the arguments. Use static
367  // functions above instead.
368  ATMPArgs() = delete;
369  };
370 
371  // Single transaction acceptance
372  MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef &ptx,
373  ATMPArgs &args)
375 
383  AcceptMultipleTransactions(const std::vector<CTransactionRef> &txns,
384  ATMPArgs &args)
386 
392  PackageMempoolAcceptResult AcceptPackage(const Package &package,
393  ATMPArgs &args)
395 
396 private:
397  // All the intermediate state that gets passed between the various levels
398  // of checking a given transaction.
399  struct Workspace {
400  Workspace(const CTransactionRef &ptx,
401  const uint32_t next_block_script_verify_flags)
402  : m_ptx(ptx),
403  m_next_block_script_verify_flags(next_block_script_verify_flags) {
404  }
406  CTxMemPool::setEntries m_ancestors;
412  std::unique_ptr<CTxMemPoolEntry> m_entry;
413 
418  int64_t m_vsize;
423  Amount m_base_fees;
424 
429  Amount m_modified_fees;
430 
431  const CTransactionRef &m_ptx;
432  TxValidationState m_state;
438  PrecomputedTransactionData m_precomputed_txdata;
439 
440  // ABC specific flags that are used in both PreChecks and
441  // ConsensusScriptChecks
442  const uint32_t m_next_block_script_verify_flags;
443  int m_sig_checks_standard;
444  };
445 
446  // Run the policy checks on a given transaction, excluding any script
447  // checks. Looks up inputs, calculates feerate, considers replacement,
448  // evaluates package limits, etc. As this function can be invoked for "free"
449  // by a peer, only tests that are fast should be done here (to avoid CPU
450  // DoS).
451  bool PreChecks(ATMPArgs &args, Workspace &ws)
452  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
453 
454  // Enforce package mempool ancestor/descendant limits (distinct from
455  // individual ancestor/descendant limits done in PreChecks).
456  bool PackageMempoolChecks(const std::vector<CTransactionRef> &txns,
457  PackageValidationState &package_state)
458  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
459 
460  // Re-run the script checks, using consensus flags, and try to cache the
461  // result in the scriptcache. This should be done after
462  // PolicyScriptChecks(). This requires that all inputs either be in our
463  // utxo set or in the mempool.
464  bool ConsensusScriptChecks(const ATMPArgs &args, Workspace &ws)
465  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
466 
467  // Try to add the transaction to the mempool, removing any conflicts first.
468  // Returns true if the transaction is in the mempool after any size
469  // limiting is performed, false otherwise.
470  bool Finalize(const ATMPArgs &args, Workspace &ws)
471  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
472 
473  // Submit all transactions to the mempool and call ConsensusScriptChecks to
474  // add to the script cache - should only be called after successful
475  // validation of all transactions in the package.
476  // The package may end up partially-submitted after size limiting;
477  // returns true if all transactions are successfully added to the mempool,
478  // false otherwise.
479  bool SubmitPackage(const ATMPArgs &args, std::vector<Workspace> &workspaces,
480  PackageValidationState &package_state,
481  std::map<const TxId, const MempoolAcceptResult> &results)
482  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
483 
484 private:
485  CTxMemPool &m_pool;
486  CCoinsViewCache m_view;
487  CCoinsViewMemPool m_viewmempool;
488  CCoinsView m_dummy;
489 
490  CChainState &m_active_chainstate;
491 
492  // The package limits in effect at the time of invocation.
493  const size_t m_limit_ancestors;
494  const size_t m_limit_ancestor_size;
495  // These may be modified while evaluating a transaction (eg to account for
496  // in-mempool conflicts; see below).
497  size_t m_limit_descendants;
498  size_t m_limit_descendant_size;
499 };
500 
501 bool MemPoolAccept::PreChecks(ATMPArgs &args, Workspace &ws) {
503  AssertLockHeld(m_pool.cs);
504  const CTransactionRef &ptx = ws.m_ptx;
505  const CTransaction &tx = *ws.m_ptx;
506  const TxId &txid = ws.m_ptx->GetId();
507 
508  // Copy/alias what we need out of args
509  const int64_t nAcceptTime = args.m_accept_time;
510  const bool bypass_limits = args.m_bypass_limits;
511  std::vector<COutPoint> &coins_to_uncache = args.m_coins_to_uncache;
512 
513  // Alias what we need out of ws
514  TxValidationState &state = ws.m_state;
515  std::unique_ptr<CTxMemPoolEntry> &entry = ws.m_entry;
516  // Coinbase is only valid in a block, not as a loose transaction.
517  if (!CheckRegularTransaction(tx, state)) {
518  // state filled in by CheckRegularTransaction.
519  return false;
520  }
521 
522  // Rather not work on nonstandard transactions (unless -testnet)
523  std::string reason;
524  if (fRequireStandard && !IsStandardTx(tx, reason)) {
525  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
526  }
527 
528  // Only accept nLockTime-using transactions that can be mined in the next
529  // block; we don't want our mempool filled up with transactions that can't
530  // be mined yet.
531  TxValidationState ctxState;
533  m_active_chainstate.m_chain.Tip(),
534  args.m_config.GetChainParams().GetConsensus(), tx, ctxState)) {
535  // We copy the state from a dummy to ensure we don't increase the
536  // ban score of peer for transaction that could be valid in the future.
538  ctxState.GetRejectReason(),
539  ctxState.GetDebugMessage());
540  }
541 
542  // Is it already in the memory pool?
543  if (m_pool.exists(txid)) {
545  "txn-already-in-mempool");
546  }
547 
548  // Check for conflicts with in-memory transactions
549  for (const CTxIn &txin : tx.vin) {
550  auto itConflicting = m_pool.mapNextTx.find(txin.prevout);
551  if (itConflicting != m_pool.mapNextTx.end()) {
552  // Disable replacement feature for good
554  "txn-mempool-conflict");
555  }
556  }
557 
558  LockPoints lp;
559  m_view.SetBackend(m_viewmempool);
560 
561  const CCoinsViewCache &coins_cache = m_active_chainstate.CoinsTip();
562  // Do all inputs exist?
563  for (const CTxIn &txin : tx.vin) {
564  if (!coins_cache.HaveCoinInCache(txin.prevout)) {
565  coins_to_uncache.push_back(txin.prevout);
566  }
567 
568  // Note: this call may add txin.prevout to the coins cache
569  // (coins_cache.cacheCoins) by way of FetchCoin(). It should be
570  // removed later (via coins_to_uncache) if this tx turns out to be
571  // invalid.
572  if (!m_view.HaveCoin(txin.prevout)) {
573  // Are inputs missing because we already have the tx?
574  for (size_t out = 0; out < tx.vout.size(); out++) {
575  // Optimistically just do efficient check of cache for
576  // outputs.
577  if (coins_cache.HaveCoinInCache(COutPoint(txid, out))) {
579  "txn-already-known");
580  }
581  }
582 
583  // Otherwise assume this might be an orphan tx for which we just
584  // haven't seen parents yet.
586  "bad-txns-inputs-missingorspent");
587  }
588  }
589 
590  // Are the actual inputs available?
591  if (!m_view.HaveInputs(tx)) {
593  "bad-txns-inputs-spent");
594  }
595 
596  // Bring the best block into scope.
597  m_view.GetBestBlock();
598 
599  // we have all inputs cached now, so switch back to dummy (to protect
600  // against bugs where we pull more inputs from disk that miss being
601  // added to coins_to_uncache)
602  m_view.SetBackend(m_dummy);
603 
604  assert(m_active_chainstate.m_blockman.LookupBlockIndex(
605  m_view.GetBestBlock()) == m_active_chainstate.m_chain.Tip());
606 
607  // Only accept BIP68 sequence locked transactions that can be mined in
608  // the next block; we don't want our mempool filled up with transactions
609  // that can't be mined yet.
610  // Pass in m_view which has all of the relevant inputs cached. Note that,
611  // since m_view's backend was removed, it no longer pulls coins from the
612  // mempool.
613  if (!CheckSequenceLocksAtTip(m_active_chainstate.m_chain.Tip(), m_view, tx,
614  &lp)) {
616  "non-BIP68-final");
617  }
618 
619  // The mempool holds txs for the next block, so pass height+1 to
620  // CheckTxInputs
621  if (!Consensus::CheckTxInputs(tx, state, m_view,
622  m_active_chainstate.m_chain.Height() + 1,
623  ws.m_base_fees)) {
624  // state filled in by CheckTxInputs
625  return false;
626  }
627 
628  // Check for non-standard pay-to-script-hash in inputs
629  if (fRequireStandard &&
630  !AreInputsStandard(tx, m_view, ws.m_next_block_script_verify_flags)) {
632  "bad-txns-nonstandard-inputs");
633  }
634 
635  // ws.m_modified_fess includes any fee deltas from PrioritiseTransaction
636  ws.m_modified_fees = ws.m_base_fees;
637  m_pool.ApplyDelta(txid, ws.m_modified_fees);
638 
639  // Keep track of transactions that spend a coinbase, which we re-scan
640  // during reorgs to ensure COINBASE_MATURITY is still met.
641  bool fSpendsCoinbase = false;
642  for (const CTxIn &txin : tx.vin) {
643  const Coin &coin = m_view.AccessCoin(txin.prevout);
644  if (coin.IsCoinBase()) {
645  fSpendsCoinbase = true;
646  break;
647  }
648  }
649 
650  unsigned int nSize = tx.GetTotalSize();
651 
652  // No transactions are allowed below minRelayTxFee except from disconnected
653  // blocks.
654  // Do not change this to use virtualsize without coordinating a network
655  // policy upgrade.
656  if (!bypass_limits && ws.m_modified_fees < minRelayTxFee.GetFee(nSize)) {
658  "min relay fee not met",
659  strprintf("%d < %d", ws.m_modified_fees,
660  ::minRelayTxFee.GetFee(nSize)));
661  }
662 
663  // Validate input scripts against standard script flags.
664  const uint32_t scriptVerifyFlags =
665  ws.m_next_block_script_verify_flags | STANDARD_SCRIPT_VERIFY_FLAGS;
666  ws.m_precomputed_txdata = PrecomputedTransactionData{tx};
667  if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false,
668  ws.m_precomputed_txdata, ws.m_sig_checks_standard)) {
669  // State filled in by CheckInputScripts
670  return false;
671  }
672 
673  entry.reset(new CTxMemPoolEntry(
674  ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(),
675  fSpendsCoinbase, ws.m_sig_checks_standard, lp));
676 
677  ws.m_vsize = entry->GetTxVirtualSize();
678 
679  Amount mempoolRejectFee =
680  m_pool
681  .GetMinFee(
682  gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
683  1000000)
684  .GetFee(ws.m_vsize);
685  if (!bypass_limits && mempoolRejectFee > Amount::zero() &&
686  ws.m_modified_fees < mempoolRejectFee) {
687  return state.Invalid(
688  TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met",
689  strprintf("%d < %d", ws.m_modified_fees, mempoolRejectFee));
690  }
691 
692  // Calculate in-mempool ancestors, up to a limit.
693  std::string errString;
694  if (!m_pool.CalculateMemPoolAncestors(
695  *entry, ws.m_ancestors, m_limit_ancestors, m_limit_ancestor_size,
696  m_limit_descendants, m_limit_descendant_size, errString)) {
698  "too-long-mempool-chain", errString);
699  }
700  return true;
701 }
702 
703 bool MemPoolAccept::PackageMempoolChecks(
704  const std::vector<CTransactionRef> &txns,
705  PackageValidationState &package_state) {
707  AssertLockHeld(m_pool.cs);
708 
709  // CheckPackageLimits expects the package transactions to not already be in
710  // the mempool.
711  assert(std::all_of(txns.cbegin(), txns.cend(), [this](const auto &tx) {
712  return !m_pool.exists(tx->GetId());
713  }));
714 
715  std::string err_string;
716  if (!m_pool.CheckPackageLimits(txns, m_limit_ancestors,
717  m_limit_ancestor_size, m_limit_descendants,
718  m_limit_descendant_size, err_string)) {
719  // This is a package-wide error, separate from an individual transaction
720  // error.
721  return package_state.Invalid(PackageValidationResult::PCKG_POLICY,
722  "package-mempool-limits", err_string);
723  }
724  return true;
725 }
726 
727 bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs &args, Workspace &ws) {
729  AssertLockHeld(m_pool.cs);
730  const CTransaction &tx = *ws.m_ptx;
731  const TxId &txid = tx.GetId();
732  TxValidationState &state = ws.m_state;
733 
734  // Check again against the next block's script verification flags
735  // to cache our script execution flags.
736  //
737  // This is also useful in case of bugs in the standard flags that cause
738  // transactions to pass as valid when they're actually invalid. For
739  // instance the STRICTENC flag was incorrectly allowing certain CHECKSIG
740  // NOT scripts to pass, even though they were invalid.
741  //
742  // There is a similar check in CreateNewBlock() to prevent creating
743  // invalid blocks (using TestBlockValidity), however allowing such
744  // transactions into the mempool can be exploited as a DoS attack.
745  int nSigChecksConsensus;
747  tx, state, m_view, m_pool, ws.m_next_block_script_verify_flags,
748  ws.m_precomputed_txdata, nSigChecksConsensus,
749  m_active_chainstate.CoinsTip())) {
750  // This can occur under some circumstances, if the node receives an
751  // unrequested tx which is invalid due to new consensus rules not
752  // being activated yet (during IBD).
753  LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against "
754  "latest-block but not STANDARD flags %s, %s\n",
755  txid.ToString(), state.ToString());
756  return Assume(false);
757  }
758 
759  if (ws.m_sig_checks_standard != nSigChecksConsensus) {
760  // We can't accept this transaction as we've used the standard count
761  // for the mempool/mining, but the consensus count will be enforced
762  // in validation (we don't want to produce bad block templates).
763  return error(
764  "%s: BUG! PLEASE REPORT THIS! SigChecks count differed between "
765  "standard and consensus flags in %s",
766  __func__, txid.ToString());
767  }
768  return true;
769 }
770 
771 bool MemPoolAccept::Finalize(const ATMPArgs &args, Workspace &ws) {
773  AssertLockHeld(m_pool.cs);
774  const TxId &txid = ws.m_ptx->GetId();
775  TxValidationState &state = ws.m_state;
776  const bool bypass_limits = args.m_bypass_limits;
777 
778  std::unique_ptr<CTxMemPoolEntry> &entry = ws.m_entry;
779 
780  // Store transaction in memory.
781  m_pool.addUnchecked(*entry, ws.m_ancestors);
782 
783  // Trim mempool and check if tx was trimmed.
784  // If we are validating a package, don't trim here because we could evict a
785  // previous transaction in the package. LimitMempoolSize() should be called
786  // at the very end to make sure the mempool is still within limits and
787  // package submission happens atomically.
788  if (!args.m_package_submission && !bypass_limits) {
789  m_pool.LimitSize(
790  m_active_chainstate.CoinsTip(),
791  gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
792  std::chrono::hours{
793  gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
794  if (!m_pool.exists(txid)) {
796  "mempool full");
797  }
798  }
799  return true;
800 }
801 
802 bool MemPoolAccept::SubmitPackage(
803  const ATMPArgs &args, std::vector<Workspace> &workspaces,
804  PackageValidationState &package_state,
805  std::map<const TxId, const MempoolAcceptResult> &results) {
807  AssertLockHeld(m_pool.cs);
808  // Sanity check: none of the transactions should be in the mempool.
809  assert(std::all_of(
810  workspaces.cbegin(), workspaces.cend(),
811  [this](const auto &ws) { return !m_pool.exists(ws.m_ptx->GetId()); }));
812 
813  bool all_submitted = true;
814  // ConsensusScriptChecks adds to the script cache and is therefore
815  // consensus-critical; CheckInputsFromMempoolAndCache asserts that
816  // transactions only spend coins available from the mempool or UTXO set.
817  // Submit each transaction to the mempool immediately after calling
818  // ConsensusScriptChecks to make the outputs available for subsequent
819  // transactions.
820  for (Workspace &ws : workspaces) {
821  if (!ConsensusScriptChecks(args, ws)) {
822  results.emplace(ws.m_ptx->GetId(),
823  MempoolAcceptResult::Failure(ws.m_state));
824  // Since PreChecks() passed, this should never fail.
825  all_submitted = Assume(false);
826  }
827 
828  // Re-calculate mempool ancestors to call addUnchecked(). They may have
829  // changed since the last calculation done in PreChecks, since package
830  // ancestors have already been submitted.
831  std::string unused_err_string;
832  if (!m_pool.CalculateMemPoolAncestors(
833  *ws.m_entry, ws.m_ancestors, m_limit_ancestors,
834  m_limit_ancestor_size, m_limit_descendants,
835  m_limit_descendant_size, unused_err_string)) {
836  results.emplace(ws.m_ptx->GetId(),
837  MempoolAcceptResult::Failure(ws.m_state));
838  // Since PreChecks() and PackageMempoolChecks() both enforce limits,
839  // this should never fail.
840  all_submitted = Assume(false);
841  }
842  // If we call LimitMempoolSize() for each individual Finalize(), the
843  // mempool will not take the transaction's descendant feerate into
844  // account because it hasn't seen them yet. Also, we risk evicting a
845  // transaction that a subsequent package transaction depends on.
846  // Instead, allow the mempool to temporarily bypass limits, the maximum
847  // package size) while submitting transactions individually and then
848  // trim at the very end.
849  if (!Finalize(args, ws)) {
850  results.emplace(ws.m_ptx->GetId(),
851  MempoolAcceptResult::Failure(ws.m_state));
852  // Since LimitMempoolSize() won't be called, this should never fail.
853  all_submitted = Assume(false);
854  }
855  }
856 
857  // It may or may not be the case that all the transactions made it into the
858  // mempool. Regardless, make sure we haven't exceeded max mempool size.
859  m_pool.LimitSize(
860  m_active_chainstate.CoinsTip(),
861  gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
862  std::chrono::hours{
863  gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
864  if (!all_submitted) {
865  return false;
866  }
867 
868  // Find the txids of the transactions that made it into the mempool. Allow
869  // partial submission, but don't report success unless they all made it into
870  // the mempool.
871  for (Workspace &ws : workspaces) {
872  if (m_pool.exists(ws.m_ptx->GetId())) {
873  results.emplace(ws.m_ptx->GetId(), MempoolAcceptResult::Success(
874  ws.m_vsize, ws.m_base_fees));
876  ws.m_ptx, m_pool.GetAndIncrementSequence());
877  } else {
878  all_submitted = false;
879  ws.m_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
880  "mempool full");
881  results.emplace(ws.m_ptx->GetId(),
882  MempoolAcceptResult::Failure(ws.m_state));
883  }
884  }
885  return all_submitted;
886 }
887 
889 MemPoolAccept::AcceptSingleTransaction(const CTransactionRef &ptx,
890  ATMPArgs &args) {
892  // mempool "read lock" (held through
893  // GetMainSignals().TransactionAddedToMempool())
894  LOCK(m_pool.cs);
895 
896  Workspace ws(ptx, GetNextBlockScriptFlags(
897  args.m_config.GetChainParams().GetConsensus(),
898  m_active_chainstate.m_chain.Tip()));
899 
900  // Perform the inexpensive checks first and avoid hashing and signature
901  // verification unless those checks pass, to mitigate CPU exhaustion
902  // denial-of-service attacks.
903  if (!PreChecks(args, ws)) {
904  return MempoolAcceptResult::Failure(ws.m_state);
905  }
906 
907  if (!ConsensusScriptChecks(args, ws)) {
908  return MempoolAcceptResult::Failure(ws.m_state);
909  }
910 
911  // Tx was accepted, but not added
912  if (args.m_test_accept) {
913  return MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees);
914  }
915 
916  if (!Finalize(args, ws)) {
917  return MempoolAcceptResult::Failure(ws.m_state);
918  }
919 
921  ptx, m_pool.GetAndIncrementSequence());
922 
923  return MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees);
924 }
925 
926 PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(
927  const std::vector<CTransactionRef> &txns, ATMPArgs &args) {
929 
930  // These context-free package limits can be done before taking the mempool
931  // lock.
932  PackageValidationState package_state;
933  if (!CheckPackage(txns, package_state)) {
934  return PackageMempoolAcceptResult(package_state, {});
935  }
936 
937  std::vector<Workspace> workspaces{};
938  workspaces.reserve(txns.size());
939  std::transform(txns.cbegin(), txns.cend(), std::back_inserter(workspaces),
940  [&args, this](const auto &tx) {
941  return Workspace(
942  tx,
943  GetNextBlockScriptFlags(
944  args.m_config.GetChainParams().GetConsensus(),
945  m_active_chainstate.m_chain.Tip()));
946  });
947  std::map<const TxId, const MempoolAcceptResult> results;
948 
949  LOCK(m_pool.cs);
950 
951  // Do all PreChecks first and fail fast to avoid running expensive script
952  // checks when unnecessary.
953  for (Workspace &ws : workspaces) {
954  if (!PreChecks(args, ws)) {
956  "transaction failed");
957  // Exit early to avoid doing pointless work. Update the failed tx
958  // result; the rest are unfinished.
959  results.emplace(ws.m_ptx->GetId(),
960  MempoolAcceptResult::Failure(ws.m_state));
961  return PackageMempoolAcceptResult(package_state,
962  std::move(results));
963  }
964  // Make the coins created by this transaction available for subsequent
965  // transactions in the package to spend.
966  m_viewmempool.PackageAddTransaction(ws.m_ptx);
967  if (args.m_test_accept) {
968  // When test_accept=true, transactions that pass PreChecks
969  // are valid because there are no further mempool checks (passing
970  // PreChecks implies passing ConsensusScriptChecks).
971  results.emplace(ws.m_ptx->GetId(), MempoolAcceptResult::Success(
972  ws.m_vsize, ws.m_base_fees));
973  }
974  }
975 
976  // Apply package mempool ancestor/descendant limits. Skip if there is only
977  // one transaction, because it's unnecessary. Also, CPFP carve out can
978  // increase the limit for individual transactions, but this exemption is
979  // not extended to packages in CheckPackageLimits().
980  std::string err_string;
981  if (txns.size() > 1 && !PackageMempoolChecks(txns, package_state)) {
982  return PackageMempoolAcceptResult(package_state, std::move(results));
983  }
984 
985  if (args.m_test_accept) {
986  return PackageMempoolAcceptResult(package_state, std::move(results));
987  }
988 
989  if (!SubmitPackage(args, workspaces, package_state, results)) {
991  "submission failed");
992  return PackageMempoolAcceptResult(package_state, std::move(results));
993  }
994 
995  return PackageMempoolAcceptResult(package_state, std::move(results));
996 }
997 
998 PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package &package,
999  ATMPArgs &args) {
1001  PackageValidationState package_state;
1002 
1003  // Check that the package is well-formed. If it isn't, we won't try to
1004  // validate any of the transactions and thus won't return any
1005  // MempoolAcceptResults, just a package-wide error.
1006 
1007  // Context-free package checks.
1008  if (!CheckPackage(package, package_state)) {
1009  return PackageMempoolAcceptResult(package_state, {});
1010  }
1011 
1012  // All transactions in the package must be a parent of the last transaction.
1013  // This is just an opportunity for us to fail fast on a context-free check
1014  // without taking the mempool lock.
1015  if (!IsChildWithParents(package)) {
1017  "package-not-child-with-parents");
1018  return PackageMempoolAcceptResult(package_state, {});
1019  }
1020 
1021  // IsChildWithParents() guarantees the package is > 1 transactions.
1022  assert(package.size() > 1);
1023  // The package must be 1 child with all of its unconfirmed parents. The
1024  // package is expected to be sorted, so the last transaction is the child.
1025  const auto &child = package.back();
1026  std::unordered_set<TxId, SaltedTxIdHasher> unconfirmed_parent_txids;
1027  std::transform(
1028  package.cbegin(), package.cend() - 1,
1029  std::inserter(unconfirmed_parent_txids, unconfirmed_parent_txids.end()),
1030  [](const auto &tx) { return tx->GetId(); });
1031 
1032  // All child inputs must refer to a preceding package transaction or a
1033  // confirmed UTXO. The only way to verify this is to look up the child's
1034  // inputs in our current coins view (not including mempool), and enforce
1035  // that all parents not present in the package be available at chain tip.
1036  // Since this check can bring new coins into the coins cache, keep track of
1037  // these coins and uncache them if we don't end up submitting this package
1038  // to the mempool.
1039  const CCoinsViewCache &coins_tip_cache = m_active_chainstate.CoinsTip();
1040  for (const auto &input : child->vin) {
1041  if (!coins_tip_cache.HaveCoinInCache(input.prevout)) {
1042  args.m_coins_to_uncache.push_back(input.prevout);
1043  }
1044  }
1045  // Using the MemPoolAccept m_view cache allows us to look up these same
1046  // coins faster later. This should be connecting directly to CoinsTip, not
1047  // to m_viewmempool, because we specifically require inputs to be confirmed
1048  // if they aren't in the package.
1049  m_view.SetBackend(m_active_chainstate.CoinsTip());
1050  const auto package_or_confirmed = [this, &unconfirmed_parent_txids](
1051  const auto &input) {
1052  return unconfirmed_parent_txids.count(input.prevout.GetTxId()) > 0 ||
1053  m_view.HaveCoin(input.prevout);
1054  };
1055  if (!std::all_of(child->vin.cbegin(), child->vin.cend(),
1056  package_or_confirmed)) {
1058  "package-not-child-with-unconfirmed-parents");
1059  return PackageMempoolAcceptResult(package_state, {});
1060  }
1061  // Protect against bugs where we pull more inputs from disk that miss being
1062  // added to coins_to_uncache. The backend will be connected again when
1063  // needed in PreChecks.
1064  m_view.SetBackend(m_dummy);
1065 
1066  LOCK(m_pool.cs);
1067  std::map<const TxId, const MempoolAcceptResult> results;
1068  // Node operators are free to set their mempool policies however they
1069  // please, nodes may receive transactions in different orders, and malicious
1070  // counterparties may try to take advantage of policy differences to pin or
1071  // delay propagation of transactions. As such, it's possible for some
1072  // package transaction(s) to already be in the mempool, and we don't want to
1073  // reject the entire package in that case (as that could be a censorship
1074  // vector). De-duplicate the transactions that are already in the mempool,
1075  // and only call AcceptMultipleTransactions() with the new transactions.
1076  // This ensures we don't double-count transaction counts and sizes when
1077  // checking ancestor/descendant limits, or double-count transaction fees for
1078  // fee-related policy.
1079  std::vector<CTransactionRef> txns_new;
1080  for (const auto &tx : package) {
1081  const auto &txid = tx->GetId();
1082  // An already confirmed tx is treated as one not in mempool, because all
1083  // we know is that the inputs aren't available.
1084  if (m_pool.exists(txid)) {
1085  // Exact transaction already exists in the mempool.
1086  auto iter = m_pool.GetIter(txid);
1087  assert(iter != std::nullopt);
1088  results.emplace(
1089  txid, MempoolAcceptResult::MempoolTx(iter.value()->GetTxSize(),
1090  iter.value()->GetFee()));
1091  } else {
1092  // Transaction does not already exist in the mempool.
1093  txns_new.push_back(tx);
1094  }
1095  }
1096 
1097  // Nothing to do if the entire package has already been submitted.
1098  if (txns_new.empty()) {
1099  return PackageMempoolAcceptResult(package_state, std::move(results));
1100  }
1101  // Validate the (deduplicated) transactions as a package.
1102  auto submission_result = AcceptMultipleTransactions(txns_new, args);
1103  // Include already-in-mempool transaction results in the final result.
1104  for (const auto &[txid, mempoolaccept_res] : results) {
1105  submission_result.m_tx_results.emplace(txid, mempoolaccept_res);
1106  }
1107  return submission_result;
1108 }
1109 } // namespace
1110 
1112  CChainState &active_chainstate,
1113  const CTransactionRef &tx,
1114  int64_t accept_time, bool bypass_limits,
1115  bool test_accept) {
1117  assert(active_chainstate.GetMempool() != nullptr);
1118  CTxMemPool &pool{*active_chainstate.GetMempool()};
1119 
1120  std::vector<COutPoint> coins_to_uncache;
1121  auto args = MemPoolAccept::ATMPArgs::SingleAccept(
1122  config, accept_time, bypass_limits, coins_to_uncache, test_accept);
1123  const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate)
1124  .AcceptSingleTransaction(tx, args);
1126  // Remove coins that were not present in the coins cache before calling
1127  // ATMPW; this is to prevent memory DoS in case we receive a large
1128  // number of invalid transactions that attempt to overrun the in-memory
1129  // coins cache
1130  // (`CCoinsViewCache::cacheCoins`).
1131 
1132  for (const COutPoint &outpoint : coins_to_uncache) {
1133  active_chainstate.CoinsTip().Uncache(outpoint);
1134  }
1135  }
1136 
1137  // After we've (potentially) uncached entries, ensure our coins cache is
1138  // still within its size limits
1139  BlockValidationState stateDummy;
1140  active_chainstate.FlushStateToDisk(stateDummy, FlushStateMode::PERIODIC);
1141  return result;
1142 }
1143 
1145 ProcessNewPackage(const Config &config, CChainState &active_chainstate,
1146  CTxMemPool &pool, const Package &package, bool test_accept) {
1148  assert(!package.empty());
1149  assert(std::all_of(package.cbegin(), package.cend(),
1150  [](const auto &tx) { return tx != nullptr; }));
1151 
1152  std::vector<COutPoint> coins_to_uncache;
1153  const auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1155  if (test_accept) {
1156  auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(
1157  config, GetTime(), coins_to_uncache);
1158  return MemPoolAccept(pool, active_chainstate)
1159  .AcceptMultipleTransactions(package, args);
1160  } else {
1161  auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(
1162  config, GetTime(), coins_to_uncache);
1163  return MemPoolAccept(pool, active_chainstate)
1164  .AcceptPackage(package, args);
1165  }
1166  }();
1167 
1168  // Uncache coins pertaining to transactions that were not submitted to the
1169  // mempool.
1170  if (test_accept || result.m_state.IsInvalid()) {
1171  for (const COutPoint &hashTx : coins_to_uncache) {
1172  active_chainstate.CoinsTip().Uncache(hashTx);
1173  }
1174  }
1175  // Ensure the coins cache is still within limits.
1176  BlockValidationState state_dummy;
1177  active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
1178  return result;
1179 }
1180 
1181 Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) {
1182  int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
1183  // Force block reward to zero when right shift is undefined.
1184  if (halvings >= 64) {
1185  return Amount::zero();
1186  }
1187 
1188  Amount nSubsidy = 50 * COIN;
1189  // Subsidy is cut in half every 210,000 blocks which will occur
1190  // approximately every 4 years.
1191  return ((nSubsidy / SATOSHI) >> halvings) * SATOSHI;
1192 }
1193 
1194 CoinsViews::CoinsViews(std::string ldb_name, size_t cache_size_bytes,
1195  bool in_memory, bool should_wipe)
1196  : m_dbview(gArgs.GetDataDirNet() / ldb_name, cache_size_bytes, in_memory,
1197  should_wipe),
1198  m_catcherview(&m_dbview) {}
1199 
1200 void CoinsViews::InitCache() {
1202  m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
1203 }
1204 
1206  ChainstateManager &chainman,
1207  std::optional<BlockHash> from_snapshot_blockhash)
1208  : m_mempool(mempool), m_blockman(blockman), m_params(::Params()),
1209  m_chainman(chainman), m_from_snapshot_blockhash(from_snapshot_blockhash) {
1210 }
1211 
1212 void CChainState::InitCoinsDB(size_t cache_size_bytes, bool in_memory,
1213  bool should_wipe, std::string leveldb_name) {
1215  leveldb_name += "_" + m_from_snapshot_blockhash->ToString();
1216  }
1217  m_coins_views = std::make_unique<CoinsViews>(leveldb_name, cache_size_bytes,
1218  in_memory, should_wipe);
1219 }
1220 
1221 void CChainState::InitCoinsCache(size_t cache_size_bytes) {
1223  assert(m_coins_views != nullptr);
1224  m_coinstip_cache_size_bytes = cache_size_bytes;
1225  m_coins_views->InitCache();
1226 }
1227 
1228 // Note that though this is marked const, we may end up modifying
1229 // `m_cached_finished_ibd`, which is a performance-related implementation
1230 // detail. This function must be marked `const` so that `CValidationInterface`
1231 // clients (which are given a `const CChainState*`) can call it.
1232 //
1234  // Optimization: pre-test latch before taking the lock.
1235  if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
1236  return false;
1237  }
1238 
1239  LOCK(cs_main);
1240  if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
1241  return false;
1242  }
1243  if (fImporting || fReindex) {
1244  return true;
1245  }
1246  if (m_chain.Tip() == nullptr) {
1247  return true;
1248  }
1250  return true;
1251  }
1252  if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) {
1253  return true;
1254  }
1255  LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
1256  m_cached_finished_ibd.store(true, std::memory_order_relaxed);
1257  return false;
1258 }
1259 
1260 static void AlertNotify(const std::string &strMessage) {
1261  uiInterface.NotifyAlertChanged();
1262 #if defined(HAVE_SYSTEM)
1263  std::string strCmd = gArgs.GetArg("-alertnotify", "");
1264  if (strCmd.empty()) {
1265  return;
1266  }
1267 
1268  // Alert text should be plain ascii coming from a trusted source, but to be
1269  // safe we first strip anything not in safeChars, then add single quotes
1270  // around the whole string before passing it to the shell:
1271  std::string singleQuote("'");
1272  std::string safeStatus = SanitizeString(strMessage);
1273  safeStatus = singleQuote + safeStatus + singleQuote;
1274  boost::replace_all(strCmd, "%s", safeStatus);
1275 
1276  std::thread t(runCommand, strCmd);
1277  // thread runs free
1278  t.detach();
1279 #endif
1280 }
1281 
1284 
1285  // Before we get past initial download, we cannot reliably alert about forks
1286  // (we assume we don't get stuck on a fork before finishing our initial
1287  // sync)
1288  if (IsInitialBlockDownload()) {
1289  return;
1290  }
1291 
1292  // If our best fork is no longer within 72 blocks (+/- 12 hours if no one
1293  // mines it) of our head, drop it
1295  m_best_fork_tip = nullptr;
1296  }
1297 
1298  if (m_best_fork_tip ||
1299  (m_chainman.m_best_invalid &&
1300  m_chainman.m_best_invalid->nChainWork >
1301  m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6))) {
1303  std::string warning =
1304  std::string("'Warning: Large-work fork detected, forking after "
1305  "block ") +
1306  m_best_fork_base->phashBlock->ToString() + std::string("'");
1307  AlertNotify(warning);
1308  }
1309 
1311  LogPrintf("%s: Warning: Large fork found\n forking the "
1312  "chain at height %d (%s)\n lasting to height %d "
1313  "(%s).\nChain state database corruption likely.\n",
1314  __func__, m_best_fork_base->nHeight,
1318  SetfLargeWorkForkFound(true);
1319  } else {
1320  LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks "
1321  "longer than our best chain.\nChain state database "
1322  "corruption likely.\n",
1323  __func__);
1325  }
1326  } else {
1327  SetfLargeWorkForkFound(false);
1329  }
1330 }
1331 
1333  CBlockIndex *pindexNewForkTip) {
1335 
1336  // If we are on a fork that is sufficiently large, set a warning flag.
1337  const CBlockIndex *pfork = m_chain.FindFork(pindexNewForkTip);
1338 
1339  // We define a condition where we should warn the user about as a fork of at
1340  // least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines
1341  // it) of ours. We use 7 blocks rather arbitrarily as it represents just
1342  // under 10% of sustained network hash rate operating on the fork, or a
1343  // chain that is entirely longer than ours and invalid (note that this
1344  // should be detected by both). We define it this way because it allows us
1345  // to only store the highest fork tip (+ base) which meets the 7-block
1346  // condition and from this always have the most-likely-to-cause-warning fork
1347  if (pfork &&
1348  (!m_best_fork_tip ||
1349  pindexNewForkTip->nHeight > m_best_fork_tip->nHeight) &&
1350  pindexNewForkTip->nChainWork - pfork->nChainWork >
1351  (GetBlockProof(*pfork) * 7) &&
1352  m_chain.Height() - pindexNewForkTip->nHeight < 72) {
1353  m_best_fork_tip = pindexNewForkTip;
1354  m_best_fork_base = pfork;
1355  }
1356 
1358 }
1359 
1360 // Called both upon regular invalid block discovery *and* InvalidateBlock
1363  if (!m_chainman.m_best_invalid ||
1364  pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork) {
1365  m_chainman.m_best_invalid = pindexNew;
1366  }
1367  if (m_chainman.m_best_header != nullptr &&
1369  pindexNew) {
1371  }
1372 
1373  // If the invalid chain found is supposed to be finalized, we need to move
1374  // back the finalization point.
1375  if (IsBlockAvalancheFinalized(pindexNew)) {
1377  m_avalancheFinalizedBlockIndex = pindexNew->pprev;
1378  }
1379 
1380  LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n",
1381  __func__, pindexNew->GetBlockHash().ToString(),
1382  pindexNew->nHeight,
1383  log(pindexNew->nChainWork.getdouble()) / log(2.0),
1384  FormatISO8601DateTime(pindexNew->GetBlockTime()));
1385  CBlockIndex *tip = m_chain.Tip();
1386  assert(tip);
1387  LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n",
1388  __func__, tip->GetBlockHash().ToString(), m_chain.Height(),
1389  log(tip->nChainWork.getdouble()) / log(2.0),
1391 }
1392 
1393 // Same as InvalidChainFound, above, except not called directly from
1394 // InvalidateBlock, which does its own setBlockIndexCandidates management.
1396  const BlockValidationState &state) {
1399  pindex->nStatus = pindex->nStatus.withFailed();
1400  m_chainman.m_failed_blocks.insert(pindex);
1401  m_blockman.m_dirty_blockindex.insert(pindex);
1402  InvalidChainFound(pindex);
1403  }
1404 }
1405 
1406 void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
1407  int nHeight) {
1408  // Mark inputs spent.
1409  if (tx.IsCoinBase()) {
1410  return;
1411  }
1412 
1413  txundo.vprevout.reserve(tx.vin.size());
1414  for (const CTxIn &txin : tx.vin) {
1415  txundo.vprevout.emplace_back();
1416  bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back());
1417  assert(is_spent);
1418  }
1419 }
1420 
1421 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
1422  int nHeight) {
1423  SpendCoins(view, tx, txundo, nHeight);
1424  AddCoins(view, tx, nHeight);
1425 }
1426 
1428  const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1429  if (!VerifyScript(scriptSig, m_tx_out.scriptPubKey, nFlags,
1432  metrics, &error)) {
1433  return false;
1434  }
1435  if ((pTxLimitSigChecks &&
1439  // we can't assign a meaningful script error (since the script
1440  // succeeded), but remove the ScriptError::OK which could be
1441  // misinterpreted.
1443  return false;
1444  }
1445  return true;
1446 }
1447 
1449  const CCoinsViewCache &inputs, const uint32_t flags,
1450  bool sigCacheStore, bool scriptCacheStore,
1451  const PrecomputedTransactionData &txdata,
1452  int &nSigChecksOut, TxSigCheckLimiter &txLimitSigChecks,
1453  CheckInputsLimiter *pBlockLimitSigChecks,
1454  std::vector<CScriptCheck> *pvChecks) {
1456  assert(!tx.IsCoinBase());
1457 
1458  if (pvChecks) {
1459  pvChecks->reserve(tx.vin.size());
1460  }
1461 
1462  // First check if script executions have been cached with the same flags.
1463  // Note that this assumes that the inputs provided are correct (ie that the
1464  // transaction hash which is in tx's prevouts properly commits to the
1465  // scriptPubKey in the inputs view of that transaction).
1466  ScriptCacheKey hashCacheEntry(tx, flags);
1467  if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore, nSigChecksOut)) {
1468  if (!txLimitSigChecks.consume_and_check(nSigChecksOut) ||
1469  (pBlockLimitSigChecks &&
1470  !pBlockLimitSigChecks->consume_and_check(nSigChecksOut))) {
1472  "too-many-sigchecks");
1473  }
1474  return true;
1475  }
1476 
1477  int nSigChecksTotal = 0;
1478 
1479  for (size_t i = 0; i < tx.vin.size(); i++) {
1480  const COutPoint &prevout = tx.vin[i].prevout;
1481  const Coin &coin = inputs.AccessCoin(prevout);
1482  assert(!coin.IsSpent());
1483 
1484  // We very carefully only pass in things to CScriptCheck which are
1485  // clearly committed to by tx's hash. This provides a sanity
1486  // check that our caching is not introducing consensus failures through
1487  // additional data in, eg, the coins being spent being checked as a part
1488  // of CScriptCheck.
1489 
1490  // Verify signature
1491  CScriptCheck check(coin.GetTxOut(), tx, i, flags, sigCacheStore, txdata,
1492  &txLimitSigChecks, pBlockLimitSigChecks);
1493 
1494  // If pvChecks is not null, defer the check execution to the caller.
1495  if (pvChecks) {
1496  pvChecks->push_back(std::move(check));
1497  continue;
1498  }
1499 
1500  if (!check()) {
1501  ScriptError scriptError = check.GetScriptError();
1502  // Compute flags without the optional standardness flags.
1503  // This differs from MANDATORY_SCRIPT_VERIFY_FLAGS as it contains
1504  // additional upgrade flags (see AcceptToMemoryPoolWorker variable
1505  // extraFlags).
1506  uint32_t mandatoryFlags =
1508  if (flags != mandatoryFlags) {
1509  // Check whether the failure was caused by a non-mandatory
1510  // script verification check. If so, ensure we return
1511  // NOT_STANDARD instead of CONSENSUS to avoid downstream users
1512  // splitting the network between upgraded and non-upgraded nodes
1513  // by banning CONSENSUS-failing data providers.
1514  CScriptCheck check2(coin.GetTxOut(), tx, i, mandatoryFlags,
1515  sigCacheStore, txdata);
1516  if (check2()) {
1517  return state.Invalid(
1519  strprintf("non-mandatory-script-verify-flag (%s)",
1520  ScriptErrorString(scriptError)));
1521  }
1522  // update the error message to reflect the mandatory violation.
1523  scriptError = check2.GetScriptError();
1524  }
1525 
1526  // MANDATORY flag failures correspond to
1527  // TxValidationResult::TX_CONSENSUS. Because CONSENSUS failures are
1528  // the most serious case of validation failures, we may need to
1529  // consider using RECENT_CONSENSUS_CHANGE for any script failure
1530  // that could be due to non-upgraded nodes which we may want to
1531  // support, to avoid splitting the network (but this depends on the
1532  // details of how net_processing handles such errors).
1533  return state.Invalid(
1535  strprintf("mandatory-script-verify-flag-failed (%s)",
1536  ScriptErrorString(scriptError)));
1537  }
1538 
1539  nSigChecksTotal += check.GetScriptExecutionMetrics().nSigChecks;
1540  }
1541 
1542  nSigChecksOut = nSigChecksTotal;
1543 
1544  if (scriptCacheStore && !pvChecks) {
1545  // We executed all of the provided scripts, and were told to cache the
1546  // result. Do so now.
1547  AddKeyInScriptCache(hashCacheEntry, nSigChecksTotal);
1548  }
1549 
1550  return true;
1551 }
1552 
1553 bool AbortNode(BlockValidationState &state, const std::string &strMessage,
1554  const bilingual_str &userMessage) {
1555  AbortNode(strMessage, userMessage);
1556  return state.Error(strMessage);
1557 }
1558 
1561  const COutPoint &out) {
1562  bool fClean = true;
1563 
1564  if (view.HaveCoin(out)) {
1565  // Overwriting transaction output.
1566  fClean = false;
1567  }
1568 
1569  if (undo.GetHeight() == 0) {
1570  // Missing undo metadata (height and coinbase). Older versions included
1571  // this information only in undo records for the last spend of a
1572  // transactions' outputs. This implies that it must be present for some
1573  // other output of the same tx.
1574  const Coin &alternate = AccessByTxid(view, out.GetTxId());
1575  if (alternate.IsSpent()) {
1576  // Adding output for transaction without known metadata
1577  return DisconnectResult::FAILED;
1578  }
1579 
1580  // This is somewhat ugly, but hopefully utility is limited. This is only
1581  // useful when working from legacy on disck data. In any case, putting
1582  // the correct information in there doesn't hurt.
1583  const_cast<Coin &>(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(),
1584  alternate.IsCoinBase());
1585  }
1586 
1587  // If the coin already exists as an unspent coin in the cache, then the
1588  // possible_overwrite parameter to AddCoin must be set to true. We have
1589  // already checked whether an unspent coin exists above using HaveCoin, so
1590  // we don't need to guess. When fClean is false, an unspent coin already
1591  // existed and it is an overwrite.
1592  view.AddCoin(out, std::move(undo), !fClean);
1593 
1595 }
1596 
1601 DisconnectResult CChainState::DisconnectBlock(const CBlock &block,
1602  const CBlockIndex *pindex,
1603  CCoinsViewCache &view) {
1605  CBlockUndo blockUndo;
1606  if (!UndoReadFromDisk(blockUndo, pindex)) {
1607  error("DisconnectBlock(): failure reading undo data");
1608  return DisconnectResult::FAILED;
1609  }
1610 
1611  return ApplyBlockUndo(blockUndo, block, pindex, view);
1612 }
1613 
1615  const CBlock &block, const CBlockIndex *pindex,
1616  CCoinsViewCache &view) {
1617  bool fClean = true;
1618 
1619  if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1620  error("DisconnectBlock(): block and undo data inconsistent");
1621  return DisconnectResult::FAILED;
1622  }
1623 
1624  // First, restore inputs.
1625  for (size_t i = 1; i < block.vtx.size(); i++) {
1626  const CTransaction &tx = *(block.vtx[i]);
1627  const CTxUndo &txundo = blockUndo.vtxundo[i - 1];
1628  if (txundo.vprevout.size() != tx.vin.size()) {
1629  error("DisconnectBlock(): transaction and undo data inconsistent");
1630  return DisconnectResult::FAILED;
1631  }
1632 
1633  for (size_t j = 0; j < tx.vin.size(); j++) {
1634  const COutPoint &out = tx.vin[j].prevout;
1635  const Coin &undo = txundo.vprevout[j];
1636  DisconnectResult res = UndoCoinSpend(undo, view, out);
1637  if (res == DisconnectResult::FAILED) {
1638  return DisconnectResult::FAILED;
1639  }
1640  fClean = fClean && res != DisconnectResult::UNCLEAN;
1641  }
1642  }
1643 
1644  // Second, revert created outputs.
1645  for (const auto &ptx : block.vtx) {
1646  const CTransaction &tx = *ptx;
1647  const TxId &txid = tx.GetId();
1648  const bool is_coinbase = tx.IsCoinBase();
1649 
1650  // Check that all outputs are available and match the outputs in the
1651  // block itself exactly.
1652  for (size_t o = 0; o < tx.vout.size(); o++) {
1653  if (tx.vout[o].scriptPubKey.IsUnspendable()) {
1654  continue;
1655  }
1656 
1657  COutPoint out(txid, o);
1658  Coin coin;
1659  bool is_spent = view.SpendCoin(out, &coin);
1660  if (!is_spent || tx.vout[o] != coin.GetTxOut() ||
1661  uint32_t(pindex->nHeight) != coin.GetHeight() ||
1662  is_coinbase != coin.IsCoinBase()) {
1663  // transaction output mismatch
1664  fClean = false;
1665  }
1666  }
1667  }
1668 
1669  // Move best block pointer to previous block.
1670  view.SetBestBlock(block.hashPrevBlock);
1671 
1673 }
1674 
1676 
1677 void StartScriptCheckWorkerThreads(int threads_num) {
1678  scriptcheckqueue.StartWorkerThreads(threads_num);
1679 }
1680 
1682  scriptcheckqueue.StopWorkerThreads();
1683 }
1684 
1685 // Returns the script flags which should be checked for the block after
1686 // the given block.
1687 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
1688  const CBlockIndex *pindex) {
1689  uint32_t flags = SCRIPT_VERIFY_NONE;
1690 
1691  // Enforce P2SH (BIP16)
1692  if (DeploymentActiveAfter(pindex, params, Consensus::DEPLOYMENT_P2SH)) {
1694  }
1695 
1696  // Enforce the DERSIG (BIP66) rule.
1697  if (DeploymentActiveAfter(pindex, params, Consensus::DEPLOYMENT_DERSIG)) {
1699  }
1700 
1701  // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule.
1702  if (DeploymentActiveAfter(pindex, params, Consensus::DEPLOYMENT_CLTV)) {
1704  }
1705 
1706  // Start enforcing CSV (BIP68, BIP112 and BIP113) rule.
1707  if (DeploymentActiveAfter(pindex, params, Consensus::DEPLOYMENT_CSV)) {
1709  }
1710 
1711  // If the UAHF is enabled, we start accepting replay protected txns
1712  if (IsUAHFenabled(params, pindex)) {
1715  }
1716 
1717  // If the DAA HF is enabled, we start rejecting transaction that use a high
1718  // s in their signature. We also make sure that signature that are supposed
1719  // to fail (for instance in multisig or other forms of smart contracts) are
1720  // null.
1721  if (IsDAAEnabled(params, pindex)) {
1724  }
1725 
1726  // When the magnetic anomaly fork is enabled, we start accepting
1727  // transactions using the OP_CHECKDATASIG opcode and it's verify
1728  // alternative. We also start enforcing push only signatures and
1729  // clean stack.
1730  if (IsMagneticAnomalyEnabled(params, pindex)) {
1733  }
1734 
1735  if (IsGravitonEnabled(params, pindex)) {
1738  }
1739 
1740  if (IsPhononEnabled(params, pindex)) {
1742  }
1743 
1744  // We make sure this node will have replay protection during the next hard
1745  // fork.
1746  if (IsReplayProtectionEnabled(params, pindex)) {
1748  }
1749 
1750  return flags;
1751 }
1752 
1753 static int64_t nTimeCheck = 0;
1754 static int64_t nTimeForks = 0;
1755 static int64_t nTimeVerify = 0;
1756 static int64_t nTimeConnect = 0;
1757 static int64_t nTimeIndex = 0;
1758 static int64_t nTimeTotal = 0;
1759 static int64_t nBlocksTotal = 0;
1760 
1767 bool CChainState::ConnectBlock(const CBlock &block, BlockValidationState &state,
1768  CBlockIndex *pindex, CCoinsViewCache &view,
1769  BlockValidationOptions options,
1770  bool fJustCheck) {
1772  assert(pindex);
1773 
1774  const BlockHash block_hash{block.GetHash()};
1775  assert(*pindex->phashBlock == block_hash);
1776 
1777  int64_t nTimeStart = GetTimeMicros();
1778 
1779  const Consensus::Params &consensusParams = m_params.GetConsensus();
1780 
1781  // Check it again in case a previous version let a bad block in
1782  // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
1783  // ContextualCheckBlockHeader() here. This means that if we add a new
1784  // consensus rule that is enforced in one of those two functions, then we
1785  // may have let in a block that violates the rule prior to updating the
1786  // software, and we would NOT be enforcing the rule here. Fully solving
1787  // upgrade from one software version to the next after a consensus rule
1788  // change is potentially tricky and issue-specific.
1789  // Also, currently the rule against blocks more than 2 hours in the future
1790  // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
1791  // re-enforce that rule here (at least until we make it impossible for
1792  // GetAdjustedTime() to go backward).
1793  if (!CheckBlock(block, state, consensusParams,
1794  options.withCheckPoW(!fJustCheck)
1795  .withCheckMerkleRoot(!fJustCheck))) {
1797  // We don't write down blocks to disk if they may have been
1798  // corrupted, so this should be impossible unless we're having
1799  // hardware problems.
1800  return AbortNode(state, "Corrupt block found indicating potential "
1801  "hardware failure; shutting down");
1802  }
1803  return error("%s: Consensus::CheckBlock: %s", __func__,
1804  state.ToString());
1805  }
1806 
1807  // Verify that the view's current state corresponds to the previous block
1808  BlockHash hashPrevBlock =
1809  pindex->pprev == nullptr ? BlockHash() : pindex->pprev->GetBlockHash();
1810  assert(hashPrevBlock == view.GetBestBlock());
1811 
1812  nBlocksTotal++;
1813 
1814  // Special case for the genesis block, skipping connection of its
1815  // transactions (its coinbase is unspendable)
1816  if (block_hash == consensusParams.hashGenesisBlock) {
1817  if (!fJustCheck) {
1818  view.SetBestBlock(pindex->GetBlockHash());
1819  }
1820 
1821  return true;
1822  }
1823 
1824  bool fScriptChecks = true;
1825  if (!hashAssumeValid.IsNull()) {
1826  // We've been configured with the hash of a block which has been
1827  // externally verified to have a valid history. A suitable default value
1828  // is included with the software and updated from time to time. Because
1829  // validity relative to a piece of software is an objective fact these
1830  // defaults can be easily reviewed. This setting doesn't force the
1831  // selection of any particular chain but makes validating some faster by
1832  // effectively caching the result of part of the verification.
1833  BlockMap::const_iterator it =
1834  m_blockman.m_block_index.find(hashAssumeValid);
1835  if (it != m_blockman.m_block_index.end()) {
1836  if (it->second.GetAncestor(pindex->nHeight) == pindex &&
1838  pindex &&
1840  // This block is a member of the assumed verified chain and an
1841  // ancestor of the best header.
1842  // Script verification is skipped when connecting blocks under
1843  // the assumevalid block. Assuming the assumevalid block is
1844  // valid this is safe because block merkle hashes are still
1845  // computed and checked, Of course, if an assumed valid block is
1846  // invalid due to false scriptSigs this optimization would allow
1847  // an invalid chain to be accepted.
1848  // The equivalent time check discourages hash power from
1849  // extorting the network via DOS attack into accepting an
1850  // invalid block through telling users they must manually set
1851  // assumevalid. Requiring a software change or burying the
1852  // invalid block, regardless of the setting, makes it hard to
1853  // hide the implication of the demand. This also avoids having
1854  // release candidates that are hardly doing any signature
1855  // verification at all in testing without having to artificially
1856  // set the default assumed verified block further back. The test
1857  // against nMinimumChainWork prevents the skipping when denied
1858  // access to any chain at least as good as the expected chain.
1859  fScriptChecks = (GetBlockProofEquivalentTime(
1860  *m_chainman.m_best_header, *pindex,
1862  consensusParams) <= 60 * 60 * 24 * 7 * 2);
1863  }
1864  }
1865  }
1866 
1867  int64_t nTime1 = GetTimeMicros();
1868  nTimeCheck += nTime1 - nTimeStart;
1869  LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n",
1870  MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO,
1872 
1873  // Do not allow blocks that contain transactions which 'overwrite' older
1874  // transactions, unless those are already completely spent. If such
1875  // overwrites are allowed, coinbases and transactions depending upon those
1876  // can be duplicated to remove the ability to spend the first instance --
1877  // even after being sent to another address.
1878  // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html
1879  // for more information. This rule was originally applied to all blocks
1880  // with a timestamp after March 15, 2012, 0:00 UTC. Now that the whole
1881  // chain is irreversibly beyond that time it is applied to all blocks
1882  // except the two in the chain that violate it. This prevents exploiting
1883  // the issue against nodes during their initial block download.
1884  bool fEnforceBIP30 = !((pindex->nHeight == 91842 &&
1885  pindex->GetBlockHash() ==
1886  uint256S("0x00000000000a4d0a398161ffc163c503763"
1887  "b1f4360639393e0e4c8e300e0caec")) ||
1888  (pindex->nHeight == 91880 &&
1889  pindex->GetBlockHash() ==
1890  uint256S("0x00000000000743f190a18c5577a3c2d2a1f"
1891  "610ae9601ac046a38084ccb7cd721")));
1892 
1893  // Once BIP34 activated it was not possible to create new duplicate
1894  // coinbases and thus other than starting with the 2 existing duplicate
1895  // coinbase pairs, not possible to create overwriting txs. But by the time
1896  // BIP34 activated, in each of the existing pairs the duplicate coinbase had
1897  // overwritten the first before the first had been spent. Since those
1898  // coinbases are sufficiently buried it's no longer possible to create
1899  // further duplicate transactions descending from the known pairs either. If
1900  // we're on the known chain at height greater than where BIP34 activated, we
1901  // can save the db accesses needed for the BIP30 check.
1902 
1903  // BIP34 requires that a block at height X (block X) has its coinbase
1904  // scriptSig start with a CScriptNum of X (indicated height X). The above
1905  // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
1906  // case that there is a block X before the BIP34 height of 227,931 which has
1907  // an indicated height Y where Y is greater than X. The coinbase for block
1908  // X would also be a valid coinbase for block Y, which could be a BIP30
1909  // violation. An exhaustive search of all mainnet coinbases before the
1910  // BIP34 height which have an indicated height greater than the block height
1911  // reveals many occurrences. The 3 lowest indicated heights found are
1912  // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
1913  // heights would be the first opportunity for BIP30 to be violated.
1914 
1915  // The search reveals a great many blocks which have an indicated height
1916  // greater than 1,983,702, so we simply remove the optimization to skip
1917  // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
1918  // that block in another 25 years or so, we should take advantage of a
1919  // future consensus change to do a new and improved version of BIP34 that
1920  // will actually prevent ever creating any duplicate coinbases in the
1921  // future.
1922  static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
1923 
1924  // There is no potential to create a duplicate coinbase at block 209,921
1925  // because this is still before the BIP34 height and so explicit BIP30
1926  // checking is still active.
1927 
1928  // The final case is block 176,684 which has an indicated height of
1929  // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
1930  // before block 490,897 so there was not much opportunity to address this
1931  // case other than to carefully analyze it and determine it would not be a
1932  // problem. Block 490,897 was, in fact, mined with a different coinbase than
1933  // block 176,684, but it is important to note that even if it hadn't been or
1934  // is remined on an alternate fork with a duplicate coinbase, we would still
1935  // not run into a BIP30 violation. This is because the coinbase for 176,684
1936  // is spent in block 185,956 in transaction
1937  // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
1938  // spending transaction can't be duplicated because it also spends coinbase
1939  // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
1940  // coinbase has an indicated height of over 4.2 billion, and wouldn't be
1941  // duplicatable until that height, and it's currently impossible to create a
1942  // chain that long. Nevertheless we may wish to consider a future soft fork
1943  // which retroactively prevents block 490,897 from creating a duplicate
1944  // coinbase. The two historical BIP30 violations often provide a confusing
1945  // edge case when manipulating the UTXO and it would be simpler not to have
1946  // another edge case to deal with.
1947 
1948  // testnet3 has no blocks before the BIP34 height with indicated heights
1949  // post BIP34 before approximately height 486,000,000 and presumably will
1950  // be reset before it reaches block 1,983,702 and starts doing unnecessary
1951  // BIP30 checking again.
1952  assert(pindex->pprev);
1953  CBlockIndex *pindexBIP34height =
1954  pindex->pprev->GetAncestor(consensusParams.BIP34Height);
1955  // Only continue to enforce if we're below BIP34 activation height or the
1956  // block hash at that height doesn't correspond.
1957  fEnforceBIP30 =
1958  fEnforceBIP30 &&
1959  (!pindexBIP34height ||
1960  !(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash));
1961 
1962  // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have
1963  // a consensus change that ensures coinbases at those heights can not
1964  // duplicate earlier coinbases.
1965  if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
1966  for (const auto &tx : block.vtx) {
1967  for (size_t o = 0; o < tx->vout.size(); o++) {
1968  if (view.HaveCoin(COutPoint(tx->GetId(), o))) {
1969  LogPrintf("ERROR: ConnectBlock(): tried to overwrite "
1970  "transaction\n");
1972  "bad-txns-BIP30");
1973  }
1974  }
1975  }
1976  }
1977 
1978  // Enforce BIP68 (sequence locks).
1979  int nLockTimeFlags = 0;
1980  if (DeploymentActiveAt(*pindex, consensusParams,
1982  nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
1983  }
1984 
1985  const uint32_t flags =
1986  GetNextBlockScriptFlags(consensusParams, pindex->pprev);
1987 
1988  int64_t nTime2 = GetTimeMicros();
1989  nTimeForks += nTime2 - nTime1;
1990  LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n",
1991  MILLI * (nTime2 - nTime1), nTimeForks * MICRO,
1993 
1994  std::vector<int> prevheights;
1995  Amount nFees = Amount::zero();
1996  int nInputs = 0;
1997 
1998  // Limit the total executed signature operations in the block, a consensus
1999  // rule. Tracking during the CPU-consuming part (validation of uncached
2000  // inputs) is per-input atomic and validation in each thread stops very
2001  // quickly after the limit is exceeded, so an adversary cannot cause us to
2002  // exceed the limit by much at all.
2003  CheckInputsLimiter nSigChecksBlockLimiter(
2005 
2006  std::vector<TxSigCheckLimiter> nSigChecksTxLimiters;
2007  nSigChecksTxLimiters.resize(block.vtx.size() - 1);
2008 
2009  CBlockUndo blockundo;
2010  blockundo.vtxundo.resize(block.vtx.size() - 1);
2011 
2012  CCheckQueueControl<CScriptCheck> control(fScriptChecks ? &scriptcheckqueue
2013  : nullptr);
2014 
2015  // Add all outputs
2016  try {
2017  for (const auto &ptx : block.vtx) {
2018  AddCoins(view, *ptx, pindex->nHeight);
2019  }
2020  } catch (const std::logic_error &e) {
2021  // This error will be thrown from AddCoin if we try to connect a block
2022  // containing duplicate transactions. Such a thing should normally be
2023  // caught early nowadays (due to ContextualCheckBlock's CTOR
2024  // enforcement) however some edge cases can escape that:
2025  // - ContextualCheckBlock does not get re-run after saving the block to
2026  // disk, and older versions may have saved a weird block.
2027  // - its checks are not applied to pre-CTOR chains, which we might visit
2028  // with checkpointing off.
2029  LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
2031  "tx-duplicate");
2032  }
2033 
2034  size_t txIndex = 0;
2035  // nSigChecksRet may be accurate (found in cache) or 0 (checks were
2036  // deferred into vChecks).
2037  int nSigChecksRet;
2038  for (const auto &ptx : block.vtx) {
2039  const CTransaction &tx = *ptx;
2040  const bool isCoinBase = tx.IsCoinBase();
2041  nInputs += tx.vin.size();
2042 
2043  {
2044  Amount txfee = Amount::zero();
2045  TxValidationState tx_state;
2046  if (!isCoinBase &&
2047  !Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight,
2048  txfee)) {
2049  // Any transaction validation failure in ConnectBlock is a block
2050  // consensus failure.
2052  tx_state.GetRejectReason(),
2053  tx_state.GetDebugMessage());
2054 
2055  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
2056  tx.GetId().ToString(), state.ToString());
2057  }
2058  nFees += txfee;
2059  }
2060 
2061  if (!MoneyRange(nFees)) {
2062  LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n",
2063  __func__);
2065  "bad-txns-accumulated-fee-outofrange");
2066  }
2067 
2068  // The following checks do not apply to the coinbase.
2069  if (isCoinBase) {
2070  continue;
2071  }
2072 
2073  // Check that transaction is BIP68 final BIP68 lock checks (as
2074  // opposed to nLockTime checks) must be in ConnectBlock because they
2075  // require the UTXO set.
2076  prevheights.resize(tx.vin.size());
2077  for (size_t j = 0; j < tx.vin.size(); j++) {
2078  prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight();
2079  }
2080 
2081  if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
2082  LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n",
2083  __func__);
2085  "bad-txns-nonfinal");
2086  }
2087 
2088  // Don't cache results if we're actually connecting blocks (still
2089  // consult the cache, though).
2090  bool fCacheResults = fJustCheck;
2091 
2092  const bool fEnforceSigCheck = flags & SCRIPT_ENFORCE_SIGCHECKS;
2093  if (!fEnforceSigCheck) {
2094  // Historically, there has been transactions with a very high
2095  // sigcheck count, so we need to disable this check for such
2096  // transactions.
2097  nSigChecksTxLimiters[txIndex] = TxSigCheckLimiter::getDisabled();
2098  }
2099 
2100  std::vector<CScriptCheck> vChecks;
2101  TxValidationState tx_state;
2102  if (fScriptChecks &&
2103  !CheckInputScripts(tx, tx_state, view, flags, fCacheResults,
2104  fCacheResults, PrecomputedTransactionData(tx),
2105  nSigChecksRet, nSigChecksTxLimiters[txIndex],
2106  &nSigChecksBlockLimiter, &vChecks)) {
2107  // Any transaction validation failure in ConnectBlock is a block
2108  // consensus failure
2110  tx_state.GetRejectReason(),
2111  tx_state.GetDebugMessage());
2112  return error(
2113  "ConnectBlock(): CheckInputScripts on %s failed with %s",
2114  tx.GetId().ToString(), state.ToString());
2115  }
2116 
2117  control.Add(vChecks);
2118 
2119  // Note: this must execute in the same iteration as CheckTxInputs (not
2120  // in a separate loop) in order to detect double spends. However,
2121  // this does not prevent double-spending by duplicated transaction
2122  // inputs in the same transaction (cf. CVE-2018-17144) -- that check is
2123  // done in CheckBlock (CheckRegularTransaction).
2124  SpendCoins(view, tx, blockundo.vtxundo.at(txIndex), pindex->nHeight);
2125  txIndex++;
2126  }
2127 
2128  int64_t nTime3 = GetTimeMicros();
2129  nTimeConnect += nTime3 - nTime2;
2131  " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) "
2132  "[%.2fs (%.2fms/blk)]\n",
2133  (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2),
2134  MILLI * (nTime3 - nTime2) / block.vtx.size(),
2135  nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs - 1),
2137 
2138  const Amount blockReward =
2139  nFees + GetBlockSubsidy(pindex->nHeight, consensusParams);
2140  if (block.vtx[0]->GetValueOut() > blockReward) {
2141  LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs "
2142  "limit=%d)\n",
2143  block.vtx[0]->GetValueOut(), blockReward);
2145  "bad-cb-amount");
2146  }
2147 
2148  const std::vector<CTxDestination> whitelist =
2149  GetMinerFundWhitelist(consensusParams, pindex->pprev);
2150  if (!whitelist.empty()) {
2151  const Amount required = GetMinerFundAmount(blockReward);
2152 
2153  for (auto &o : block.vtx[0]->vout) {
2154  if (o.nValue < required) {
2155  // This output doesn't qualify because its amount is too low.
2156  continue;
2157  }
2158 
2159  CTxDestination address;
2160  if (!ExtractDestination(o.scriptPubKey, address)) {
2161  // Cannot decode address.
2162  continue;
2163  }
2164 
2165  if (std::find(whitelist.begin(), whitelist.end(), address) !=
2166  whitelist.end()) {
2167  goto MinerFundSuccess;
2168  }
2169  }
2170 
2171  // We did not find an output that match the miner fund requirements.
2173  "bad-cb-minerfund");
2174  }
2175 
2176 MinerFundSuccess:
2177 
2178  if (!control.Wait()) {
2180  "blk-bad-inputs", "parallel script check failed");
2181  }
2182 
2183  int64_t nTime4 = GetTimeMicros();
2184  nTimeVerify += nTime4 - nTime2;
2185  LogPrint(
2186  BCLog::BENCH,
2187  " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n",
2188  nInputs - 1, MILLI * (nTime4 - nTime2),
2189  nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs - 1),
2191 
2192  if (fJustCheck) {
2193  return true;
2194  }
2195 
2196  if (!m_blockman.WriteUndoDataForBlock(blockundo, state, pindex, m_params)) {
2197  return false;
2198  }
2199 
2200  if (!pindex->IsValid(BlockValidity::SCRIPTS)) {
2202  m_blockman.m_dirty_blockindex.insert(pindex);
2203  }
2204 
2205  assert(pindex->phashBlock);
2206  // add this block to the view's block chain
2207  view.SetBestBlock(pindex->GetBlockHash());
2208 
2209  int64_t nTime5 = GetTimeMicros();
2210  nTimeIndex += nTime5 - nTime4;
2211  LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n",
2212  MILLI * (nTime5 - nTime4), nTimeIndex * MICRO,
2214 
2215  TRACE6(validation, block_connected, block_hash.data(), pindex->nHeight,
2216  block.vtx.size(), nInputs, nSigChecksRet,
2217  // in microseconds (µs)
2218  nTime5 - nTimeStart);
2219 
2220  return true;
2221 }
2222 
2223 CoinsCacheSizeState CChainState::GetCoinsCacheSizeState() {
2225  return this->GetCoinsCacheSizeState(
2227  gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
2228 }
2229 
2231 CChainState::GetCoinsCacheSizeState(size_t max_coins_cache_size_bytes,
2232  size_t max_mempool_size_bytes) {
2234  int64_t nMempoolUsage = m_mempool ? m_mempool->DynamicMemoryUsage() : 0;
2235  int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
2236  int64_t nTotalSpace =
2237  max_coins_cache_size_bytes +
2238  std::max<int64_t>(int64_t(max_mempool_size_bytes) - nMempoolUsage, 0);
2239 
2241  static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES =
2242  10 * 1024 * 1024; // 10MB
2243  int64_t large_threshold = std::max(
2244  (9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
2245 
2246  if (cacheSize > nTotalSpace) {
2247  LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize,
2248  nTotalSpace);
2250  } else if (cacheSize > large_threshold) {
2252  }
2253  return CoinsCacheSizeState::OK;
2254 }
2255 
2257  FlushStateMode mode,
2258  int nManualPruneHeight) {
2259  LOCK(cs_main);
2260  assert(this->CanFlushToDisk());
2261  static std::chrono::microseconds nLastWrite{0};
2262  static std::chrono::microseconds nLastFlush{0};
2263  std::set<int> setFilesToPrune;
2264  bool full_flush_completed = false;
2265 
2266  const size_t coins_count = CoinsTip().GetCacheSize();
2267  const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2268 
2269  try {
2270  {
2271  bool fFlushForPrune = false;
2272  bool fDoFullFlush = false;
2273 
2274  CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
2276  if (fPruneMode &&
2277  (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) &&
2278  !fReindex) {
2279  // Make sure we don't prune above the blockfilterindexes
2280  // bestblocks. Pruning is height-based.
2281  int last_prune = m_chain.Height();
2283  last_prune = std::max(
2284  1, std::min(last_prune,
2285  index.GetSummary().best_block_height));
2286  });
2287 
2288  if (nManualPruneHeight > 0) {
2290  "find files to prune (manual)", BCLog::BENCH);
2292  setFilesToPrune,
2293  std::min(last_prune, nManualPruneHeight),
2294  m_chain.Height());
2295  } else {
2296  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune",
2297  BCLog::BENCH);
2299  setFilesToPrune, m_params.PruneAfterHeight(),
2300  m_chain.Height(), last_prune, IsInitialBlockDownload());
2302  }
2303  if (!setFilesToPrune.empty()) {
2304  fFlushForPrune = true;
2305  if (!m_blockman.m_have_pruned) {
2306  m_blockman.m_block_tree_db->WriteFlag(
2307  "prunedblockfiles", true);
2308  m_blockman.m_have_pruned = true;
2309  }
2310  }
2311  }
2312  const auto nNow = GetTime<std::chrono::microseconds>();
2313  // Avoid writing/flushing immediately after startup.
2314  if (nLastWrite.count() == 0) {
2315  nLastWrite = nNow;
2316  }
2317  if (nLastFlush.count() == 0) {
2318  nLastFlush = nNow;
2319  }
2320  // The cache is large and we're within 10% and 10 MiB of the limit,
2321  // but we have time now (not in the middle of a block processing).
2322  bool fCacheLarge = mode == FlushStateMode::PERIODIC &&
2323  cache_state >= CoinsCacheSizeState::LARGE;
2324  // The cache is over the limit, we have to write now.
2325  bool fCacheCritical = mode == FlushStateMode::IF_NEEDED &&
2326  cache_state >= CoinsCacheSizeState::CRITICAL;
2327  // It's been a while since we wrote the block index to disk. Do this
2328  // frequently, so we don't need to redownload after a crash.
2329  bool fPeriodicWrite = mode == FlushStateMode::PERIODIC &&
2330  nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
2331  // It's been very long since we flushed the cache. Do this
2332  // infrequently, to optimize cache usage.
2333  bool fPeriodicFlush = mode == FlushStateMode::PERIODIC &&
2334  nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
2335  // Combine all conditions that result in a full cache flush.
2336  fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge ||
2337  fCacheCritical || fPeriodicFlush || fFlushForPrune;
2338  // Write blocks and block index to disk.
2339  if (fDoFullFlush || fPeriodicWrite) {
2340  // Ensure we can write block index
2342  return AbortNode(state, "Disk space is too low!",
2343  _("Disk space is too low!"));
2344  }
2345 
2346  {
2348  "write block and undo data to disk", BCLog::BENCH);
2349 
2350  // First make sure all block and undo data is flushed to
2351  // disk.
2353  }
2354  // Then update all block file information (which may refer to
2355  // block and undo files).
2356  {
2357  LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk",
2358  BCLog::BENCH);
2359 
2360  if (!m_blockman.WriteBlockIndexDB()) {
2361  return AbortNode(
2362  state, "Failed to write to block index database");
2363  }
2364  }
2365 
2366  // Finally remove any pruned files
2367  if (fFlushForPrune) {
2368  LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files",
2369  BCLog::BENCH);
2370 
2371  UnlinkPrunedFiles(setFilesToPrune);
2372  }
2373  nLastWrite = nNow;
2374  }
2375  // Flush best chain related state. This can only be done if the
2376  // blocks / block index write was also done.
2377  if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2379  strprintf("write coins cache to disk (%d coins, %.2fkB)",
2380  coins_count, coins_mem_usage / 1000),
2381  BCLog::BENCH);
2382 
2383  // Typical Coin structures on disk are around 48 bytes in size.
2384  // Pushing a new one to the database can cause it to be written
2385  // twice (once in the log, and once in the tables). This is
2386  // already an overestimation, as most will delete an existing
2387  // entry or overwrite one. Still, use a conservative safety
2388  // factor of 2.
2390  48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2391  return AbortNode(state, "Disk space is too low!",
2392  _("Disk space is too low!"));
2393  }
2394 
2395  // Flush the chainstate (which may refer to block index
2396  // entries).
2397  if (!CoinsTip().Flush()) {
2398  return AbortNode(state, "Failed to write to coin database");
2399  }
2400  nLastFlush = nNow;
2401  full_flush_completed = true;
2402  }
2403 
2404  TRACE5(utxocache, flush,
2405  // in microseconds (µs)
2406  GetTimeMicros() - nNow.count(), uint32_t(mode), coins_count,
2407  uint64_t(coins_mem_usage), fFlushForPrune);
2408  }
2409 
2410  if (full_flush_completed) {
2411  // Update best block in wallet (so we can detect restored wallets).
2413  }
2414  } catch (const std::runtime_error &e) {
2415  return AbortNode(state, std::string("System error while flushing: ") +
2416  e.what());
2417  }
2418  return true;
2419 }
2420 
2422  BlockValidationState state;
2423  if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
2424  LogPrintf("%s: failed to flush state (%s)\n", __func__,
2425  state.ToString());
2426  }
2427 }
2428 
2430  BlockValidationState state;
2432  if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) {
2433  LogPrintf("%s: failed to flush state (%s)\n", __func__,
2434  state.ToString());
2435  }
2436 }
2437 
2438 static void UpdateTipLog(const CCoinsViewCache &coins_tip,
2439  const CBlockIndex *tip, const CChainParams &params,
2440  const std::string &func_name,
2441  const std::string &prefix)
2444  LogPrintf("%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%ld "
2445  "date='%s' progress=%f cache=%.1fMiB(%utxo)\n",
2446  prefix, func_name, tip->GetBlockHash().ToString(), tip->nHeight,
2447  tip->nVersion, log(tip->nChainWork.getdouble()) / log(2.0),
2448  tip->GetChainTxCount(),
2449  FormatISO8601DateTime(tip->GetBlockTime()),
2450  GuessVerificationProgress(params.TxData(), tip),
2451  coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
2452  coins_tip.GetCacheSize());
2453 }
2454 
2455 void CChainState::UpdateTip(const CBlockIndex *pindexNew) {
2457  const auto &coins_tip = CoinsTip();
2458 
2459  // The remainder of the function isn't relevant if we are not acting on
2460  // the active chainstate, so return if need be.
2461  if (this != &m_chainman.ActiveChainstate()) {
2462  // Only log every so often so that we don't bury log messages at the
2463  // tip.
2464  constexpr int BACKGROUND_LOG_INTERVAL = 2000;
2465  if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) {
2466  UpdateTipLog(coins_tip, pindexNew, m_params, __func__,
2467  "[background validation] ");
2468  }
2469  return;
2470  }
2471 
2472  // New best block
2473  if (m_mempool) {
2475  }
2476 
2477  {
2479  g_best_block = pindexNew->GetBlockHash();
2480  g_best_block_cv.notify_all();
2481  }
2482 
2483  UpdateTipLog(coins_tip, pindexNew, m_params, __func__, "");
2484 }
2485 
2498  DisconnectedBlockTransactions *disconnectpool) {
2500  if (m_mempool) {
2502  }
2503 
2504  CBlockIndex *pindexDelete = m_chain.Tip();
2505  const Consensus::Params &consensusParams = m_params.GetConsensus();
2506 
2507  assert(pindexDelete);
2508 
2509  // Read block from disk.
2510  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2511  CBlock &block = *pblock;
2512  if (!ReadBlockFromDisk(block, pindexDelete, consensusParams)) {
2513  return error("DisconnectTip(): Failed to read block");
2514  }
2515 
2516  // Apply the block atomically to the chain state.
2517  int64_t nStart = GetTimeMicros();
2518  {
2519  CCoinsViewCache view(&CoinsTip());
2520  assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2521  if (DisconnectBlock(block, pindexDelete, view) !=
2523  return error("DisconnectTip(): DisconnectBlock %s failed",
2524  pindexDelete->GetBlockHash().ToString());
2525  }
2526 
2527  bool flushed = view.Flush();
2528  assert(flushed);
2529  }
2530 
2531  LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
2532  (GetTimeMicros() - nStart) * MILLI);
2533 
2534  // Write the chain state to disk, if necessary.
2536  return false;
2537  }
2538 
2539  if (m_mempool) {
2540  // If this block is deactivating a fork, we move all mempool
2541  // transactions in front of disconnectpool for reprocessing in a future
2542  // updateMempoolForReorg call
2543  if (pindexDelete->pprev != nullptr &&
2544  GetNextBlockScriptFlags(consensusParams, pindexDelete) !=
2545  GetNextBlockScriptFlags(consensusParams, pindexDelete->pprev)) {
2547  "Disconnecting mempool due to rewind of upgrade block\n");
2548  if (disconnectpool) {
2549  disconnectpool->importMempool(*m_mempool);
2550  }
2551  m_mempool->clear();
2552  }
2553 
2554  if (disconnectpool) {
2555  disconnectpool->addForBlock(block.vtx, *m_mempool);
2556  }
2557  }
2558 
2559  m_chain.SetTip(pindexDelete->pprev);
2560 
2561  UpdateTip(pindexDelete->pprev);
2562  // Let wallets know transactions went from 1-confirmed to
2563  // 0-confirmed or conflicted:
2564  GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2565  return true;
2566 }
2567 
2568 static int64_t nTimeReadFromDisk = 0;
2569 static int64_t nTimeConnectTotal = 0;
2570 static int64_t nTimeFlush = 0;
2571 static int64_t nTimeChainState = 0;
2572 static int64_t nTimePostConnect = 0;
2573 
2575  CBlockIndex *pindex = nullptr;
2576  std::shared_ptr<const CBlock> pblock;
2578 };
2579 
2588 private:
2589  std::vector<PerBlockConnectTrace> blocksConnected;
2590 
2591 public:
2592  explicit ConnectTrace() : blocksConnected(1) {}
2593 
2595  std::shared_ptr<const CBlock> pblock) {
2596  assert(!blocksConnected.back().pindex);
2597  assert(pindex);
2598  assert(pblock);
2599  blocksConnected.back().pindex = pindex;
2600  blocksConnected.back().pblock = std::move(pblock);
2601  blocksConnected.emplace_back();
2602  }
2603 
2604  std::vector<PerBlockConnectTrace> &GetBlocksConnected() {
2605  // We always keep one extra block at the end of our list because blocks
2606  // are added after all the conflicted transactions have been filled in.
2607  // Thus, the last entry should always be an empty one waiting for the
2608  // transactions from the next block. We pop the last entry here to make
2609  // sure the list we return is sane.
2610  assert(!blocksConnected.back().pindex);
2611  blocksConnected.pop_back();
2612  return blocksConnected;
2613  }
2614 };
2615 
2625  CBlockIndex *pindexNew,
2626  const std::shared_ptr<const CBlock> &pblock,
2627  ConnectTrace &connectTrace,
2628  DisconnectedBlockTransactions &disconnectpool) {
2630  if (m_mempool) {
2632  }
2633 
2634  const Consensus::Params &consensusParams = m_params.GetConsensus();
2635 
2636  assert(pindexNew->pprev == m_chain.Tip());
2637  // Read block from disk.
2638  int64_t nTime1 = GetTimeMicros();
2639  std::shared_ptr<const CBlock> pthisBlock;
2640  if (!pblock) {
2641  std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2642  if (!ReadBlockFromDisk(*pblockNew, pindexNew, consensusParams)) {
2643  return AbortNode(state, "Failed to read block");
2644  }
2645  pthisBlock = pblockNew;
2646  } else {
2647  pthisBlock = pblock;
2648  }
2649 
2650  const CBlock &blockConnecting = *pthisBlock;
2651 
2652  // Apply the block atomically to the chain state.
2653  int64_t nTime2 = GetTimeMicros();
2654  nTimeReadFromDisk += nTime2 - nTime1;
2655  int64_t nTime3;
2656  LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n",
2657  (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
2658  {
2659  CCoinsViewCache view(&CoinsTip());
2660  bool rv = ConnectBlock(blockConnecting, state, pindexNew, view,
2661  BlockValidationOptions(config));
2662  GetMainSignals().BlockChecked(blockConnecting, state);
2663  if (!rv) {
2664  if (state.IsInvalid()) {
2665  InvalidBlockFound(pindexNew, state);
2666  }
2667 
2668  return error("%s: ConnectBlock %s failed, %s", __func__,
2669  pindexNew->GetBlockHash().ToString(),
2670  state.ToString());
2671  }
2672 
2673  nTime3 = GetTimeMicros();
2674  nTimeConnectTotal += nTime3 - nTime2;
2675  assert(nBlocksTotal > 0);
2677  " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n",
2678  (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO,
2680  bool flushed = view.Flush();
2681  assert(flushed);
2682  }
2683 
2684  int64_t nTime4 = GetTimeMicros();
2685  nTimeFlush += nTime4 - nTime3;
2686  LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n",
2687  (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO,
2689 
2690  // Write the chain state to disk, if necessary.
2692  return false;
2693  }
2694 
2695  int64_t nTime5 = GetTimeMicros();
2696  nTimeChainState += nTime5 - nTime4;
2698  " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n",
2699  (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO,
2701 
2702  // Remove conflicting transactions from the mempool.;
2703  if (m_mempool) {
2704  m_mempool->removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
2705  disconnectpool.removeForBlock(blockConnecting.vtx);
2706 
2707  // If this block is activating a fork, we move all mempool transactions
2708  // in front of disconnectpool for reprocessing in a future
2709  // updateMempoolForReorg call
2710  if (pindexNew->pprev != nullptr &&
2711  GetNextBlockScriptFlags(consensusParams, pindexNew) !=
2712  GetNextBlockScriptFlags(consensusParams, pindexNew->pprev)) {
2713  LogPrint(
2715  "Disconnecting mempool due to acceptance of upgrade block\n");
2716  disconnectpool.importMempool(*m_mempool);
2717  }
2718  }
2719 
2720  // Update m_chain & related variables.
2721  m_chain.SetTip(pindexNew);
2722  UpdateTip(pindexNew);
2723 
2724  int64_t nTime6 = GetTimeMicros();
2725  nTimePostConnect += nTime6 - nTime5;
2726  nTimeTotal += nTime6 - nTime1;
2728  " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n",
2729  (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO,
2731  LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n",
2732  (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO,
2734 
2735  connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2736  return true;
2737 }
2738 
2744  std::vector<const CBlockIndex *> &blocksToReconcile) {
2746  do {
2747  CBlockIndex *pindexNew = nullptr;
2748 
2749  // Find the best candidate header.
2750  {
2751  std::set<CBlockIndex *, CBlockIndexWorkComparator>::reverse_iterator
2752  it = setBlockIndexCandidates.rbegin();
2753  if (it == setBlockIndexCandidates.rend()) {
2754  return nullptr;
2755  }
2756  pindexNew = *it;
2757  }
2758 
2759  // If this block will cause an avalanche finalized block to be reorged,
2760  // then we park it.
2761  {
2763  if (m_avalancheFinalizedBlockIndex &&
2764  !AreOnTheSameFork(pindexNew, m_avalancheFinalizedBlockIndex)) {
2765  LogPrintf("Park block %s because it forks prior to the "
2766  "avalanche finalized chaintip.\n",
2767  pindexNew->GetBlockHash().ToString());
2768  pindexNew->nStatus = pindexNew->nStatus.withParked();
2769  m_blockman.m_dirty_blockindex.insert(pindexNew);
2770  }
2771  }
2772 
2773  const bool fAvalancheEnabled = isAvalancheEnabled(gArgs);
2774  const bool fAutoUnpark =
2775  gArgs.GetBoolArg("-automaticunparking", !fAvalancheEnabled);
2776 
2777  const CBlockIndex *pindexFork = m_chain.FindFork(pindexNew);
2778 
2779  // Check whether all blocks on the path between the currently active
2780  // chain and the candidate are valid. Just going until the active chain
2781  // is an optimization, as we know all blocks in it are valid already.
2782  CBlockIndex *pindexTest = pindexNew;
2783  bool hasValidAncestor = true;
2784  while (hasValidAncestor && pindexTest && pindexTest != pindexFork) {
2785  assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2786 
2787  // If this is a parked chain, but it has enough PoW, clear the park
2788  // state.
2789  bool fParkedChain = pindexTest->nStatus.isOnParkedChain();
2790  if (fAutoUnpark && fParkedChain) {
2791  const CBlockIndex *pindexTip = m_chain.Tip();
2792 
2793  // During initialization, pindexTip and/or pindexFork may be
2794  // null. In this case, we just ignore the fact that the chain is
2795  // parked.
2796  if (!pindexTip || !pindexFork) {
2797  UnparkBlock(pindexTest);
2798  continue;
2799  }
2800 
2801  // A parked chain can be unparked if it has twice as much PoW
2802  // accumulated as the main chain has since the fork block.
2803  CBlockIndex const *pindexExtraPow = pindexTip;
2804  arith_uint256 requiredWork = pindexTip->nChainWork;
2805  switch (pindexTip->nHeight - pindexFork->nHeight) {
2806  // Limit the penality for depth 1, 2 and 3 to half a block
2807  // worth of work to ensure we don't fork accidentally.
2808  case 3:
2809  case 2:
2810  pindexExtraPow = pindexExtraPow->pprev;
2811  // FALLTHROUGH
2812  case 1: {
2813  const arith_uint256 deltaWork =
2814  pindexExtraPow->nChainWork - pindexFork->nChainWork;
2815  requiredWork += (deltaWork >> 1);
2816  break;
2817  }
2818  default:
2819  requiredWork +=
2820  pindexExtraPow->nChainWork - pindexFork->nChainWork;
2821  break;
2822  }
2823 
2824  if (pindexNew->nChainWork > requiredWork) {
2825  // We have enough, clear the parked state.
2826  LogPrintf("Unpark chain up to block %s as it has "
2827  "accumulated enough PoW.\n",
2828  pindexNew->GetBlockHash().ToString());
2829  fParkedChain = false;
2830  UnparkBlock(pindexTest);
2831  }
2832  }
2833 
2834  // Pruned nodes may have entries in setBlockIndexCandidates for
2835  // which block files have been deleted. Remove those as candidates
2836  // for the most work chain if we come across them; we can't switch
2837  // to a chain unless we have all the non-active-chain parent blocks.
2838  bool fInvalidChain = pindexTest->nStatus.isInvalid();
2839  bool fMissingData = !pindexTest->nStatus.hasData();
2840  if (!(fInvalidChain || fParkedChain || fMissingData)) {
2841  // The current block is acceptable, move to the parent, up to
2842  // the fork point.
2843  pindexTest = pindexTest->pprev;
2844  continue;
2845  }
2846 
2847  // Candidate chain is not usable (either invalid or parked or
2848  // missing data)
2849  hasValidAncestor = false;
2850  setBlockIndexCandidates.erase(pindexTest);
2851 
2852  if (fInvalidChain && (m_chainman.m_best_invalid == nullptr ||
2853  pindexNew->nChainWork >
2854  m_chainman.m_best_invalid->nChainWork)) {
2855  m_chainman.m_best_invalid = pindexNew;
2856  }
2857 
2858  if (fParkedChain && (m_chainman.m_best_parked == nullptr ||
2859  pindexNew->nChainWork >
2860  m_chainman.m_best_parked->nChainWork)) {
2861  m_chainman.m_best_parked = pindexNew;
2862  }
2863 
2864  LogPrintf("Considered switching to better tip %s but that chain "
2865  "contains a%s%s%s block.\n",
2866  pindexNew->GetBlockHash().ToString(),
2867  fInvalidChain ? "n invalid" : "",
2868  fParkedChain ? " parked" : "",
2869  fMissingData ? " missing-data" : "");
2870 
2871  CBlockIndex *pindexFailed = pindexNew;
2872  // Remove the entire chain from the set.
2873  while (pindexTest != pindexFailed) {
2874  if (fInvalidChain || fParkedChain) {
2875  pindexFailed->nStatus =
2876  pindexFailed->nStatus.withFailedParent(fInvalidChain)
2877  .withParkedParent(fParkedChain);
2878  } else if (fMissingData) {
2879  // If we're missing data, then add back to
2880  // m_blocks_unlinked, so that if the block arrives in the
2881  // future we can try adding to setBlockIndexCandidates
2882  // again.
2884  std::make_pair(pindexFailed->pprev, pindexFailed));
2885  }
2886  setBlockIndexCandidates.erase(pindexFailed);
2887  pindexFailed = pindexFailed->pprev;
2888  }
2889 
2890  if (fInvalidChain || fParkedChain) {
2891  // We discovered a new chain tip that is either parked or
2892  // invalid, we may want to warn.
2894  }
2895  }
2896 
2897  if (fAvalancheEnabled && g_avalanche) {
2898  blocksToReconcile.push_back(pindexNew);
2899  }
2900 
2901  // We found a candidate that has valid ancestors. This is our guy.
2902  if (hasValidAncestor) {
2903  return pindexNew;
2904  }
2905  } while (true);
2906 }
2907 
2913  // Note that we can't delete the current block itself, as we may need to
2914  // return to it later in case a reorganization to a better block fails.
2915  auto it = setBlockIndexCandidates.begin();
2916  while (it != setBlockIndexCandidates.end() &&
2917  setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2918  setBlockIndexCandidates.erase(it++);
2919  }
2920 
2921  // Either the current tip or a successor of it we're working towards is left
2922  // in setBlockIndexCandidates.
2923  assert(!setBlockIndexCandidates.empty());
2924 }
2925 
2934  const Config &config, BlockValidationState &state,
2935  CBlockIndex *pindexMostWork, const std::shared_ptr<const CBlock> &pblock,
2936  bool &fInvalidFound, ConnectTrace &connectTrace) {
2938  if (m_mempool) {
2940  }
2941 
2942  const CBlockIndex *pindexOldTip = m_chain.Tip();
2943  const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
2944 
2945  // Disconnect active blocks which are no longer in the best chain.
2946  bool fBlocksDisconnected = false;
2947  DisconnectedBlockTransactions disconnectpool;
2948  while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2949  if (!DisconnectTip(state, &disconnectpool)) {
2950  // This is likely a fatal error, but keep the mempool consistent,
2951  // just in case. Only remove from the mempool in this case.
2952  if (m_mempool) {
2953  disconnectpool.updateMempoolForReorg(config, *this, false,
2954  *m_mempool);
2955  }
2956 
2957  // If we're unable to disconnect a block during normal operation,
2958  // then that is a failure of our local system -- we should abort
2959  // rather than stay on a less work chain.
2960  AbortNode(state,
2961  "Failed to disconnect block; see debug.log for details");
2962  return false;
2963  }
2964 
2965  fBlocksDisconnected = true;
2966  }
2967 
2968  // Build list of new blocks to connect.
2969  std::vector<CBlockIndex *> vpindexToConnect;
2970  bool fContinue = true;
2971  int nHeight = pindexFork ? pindexFork->nHeight : -1;
2972  while (fContinue && nHeight != pindexMostWork->nHeight) {
2973  // Don't iterate the entire list of potential improvements toward the
2974  // best tip, as we likely only need a few blocks along the way.
2975  int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
2976  vpindexToConnect.clear();
2977  vpindexToConnect.reserve(nTargetHeight - nHeight);
2978  CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
2979  while (pindexIter && pindexIter->nHeight != nHeight) {
2980  vpindexToConnect.push_back(pindexIter);
2981  pindexIter = pindexIter->pprev;
2982  }
2983 
2984  nHeight = nTargetHeight;
2985 
2986  // Connect new blocks.
2987  for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
2988  if (!ConnectTip(config, state, pindexConnect,
2989  pindexConnect == pindexMostWork
2990  ? pblock
2991  : std::shared_ptr<const CBlock>(),
2992  connectTrace, disconnectpool)) {
2993  if (state.IsInvalid()) {
2994  // The block violates a consensus rule.
2995  if (state.GetResult() !=
2997  InvalidChainFound(vpindexToConnect.back());
2998  }
2999  state = BlockValidationState();
3000  fInvalidFound = true;
3001  fContinue = false;
3002  break;
3003  }
3004 
3005  // A system error occurred (disk space, database error, ...).
3006  // Make the mempool consistent with the current tip, just in
3007  // case any observers try to use it before shutdown.
3008  if (m_mempool) {
3009  disconnectpool.updateMempoolForReorg(config, *this, false,
3010  *m_mempool);
3011  }
3012  return false;
3013  } else {
3015  if (!pindexOldTip ||
3016  m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
3017  // We're in a better position than we were. Return
3018  // temporarily to release the lock.
3019  fContinue = false;
3020  break;
3021  }
3022  }
3023  }
3024  }
3025 
3026  if (m_mempool) {
3027  if (fBlocksDisconnected || !disconnectpool.isEmpty()) {
3028  // If any blocks were disconnected, we need to update the mempool
3029  // even if disconnectpool is empty. The disconnectpool may also be
3030  // non-empty if the mempool was imported due to new validation rules
3031  // being in effect.
3033  "Updating mempool due to reorganization or "
3034  "rules upgrade/downgrade\n");
3035  disconnectpool.updateMempoolForReorg(config, *this, true,
3036  *m_mempool);
3037  }
3038 
3039  m_mempool->check(this->CoinsTip(), this->m_chain.Height() + 1);
3040  }
3041 
3042  // Callbacks/notifications for a new best chain.
3043  if (fInvalidFound) {
3044  CheckForkWarningConditionsOnNewFork(pindexMostWork);
3045  } else {
3047  }
3048 
3049  return true;
3050 }
3051 
3053  if (!init) {
3055  }
3056  if (::fReindex) {
3058  }
3060 }
3061 
3063  bool fNotify = false;
3064  bool fInitialBlockDownload = false;
3065  static CBlockIndex *pindexHeaderOld = nullptr;
3066  CBlockIndex *pindexHeader = nullptr;
3067  {
3068  LOCK(cs_main);
3069  pindexHeader = chainstate.m_chainman.m_best_header;
3070 
3071  if (pindexHeader != pindexHeaderOld) {
3072  fNotify = true;
3073  fInitialBlockDownload = chainstate.IsInitialBlockDownload();
3074  pindexHeaderOld = pindexHeader;
3075  }
3076  }
3077 
3078  // Send block tip changed notifications without cs_main
3079  if (fNotify) {
3080  uiInterface.NotifyHeaderTip(
3081  GetSynchronizationState(fInitialBlockDownload), pindexHeader);
3082  }
3083  return fNotify;
3084 }
3085 
3088 
3089  if (GetMainSignals().CallbacksPending() > 10) {
3091  }
3092 }
3093 
3095  BlockValidationState &state,
3096  std::shared_ptr<const CBlock> pblock) {
3098 
3099  // Note that while we're often called here from ProcessNewBlock, this is
3100  // far from a guarantee. Things in the P2P/RPC will often end up calling
3101  // us in the middle of ProcessNewBlock - do not assume pblock is set
3102  // sanely for performance or correctness!
3104 
3105  // ABC maintains a fair degree of expensive-to-calculate internal state
3106  // because this function periodically releases cs_main so that it does not
3107  // lock up other threads for too long during large connects - and to allow
3108  // for e.g. the callback queue to drain we use m_chainstate_mutex to enforce
3109  // mutual exclusion so that only one caller may execute this function at a
3110  // time
3112 
3113  CBlockIndex *pindexMostWork = nullptr;
3114  CBlockIndex *pindexNewTip = nullptr;
3115  int nStopAtHeight = gArgs.GetIntArg("-stopatheight", DEFAULT_STOPATHEIGHT);
3116  do {
3117  // Block until the validation queue drains. This should largely
3118  // never happen in normal operation, however may happen during
3119  // reindex, causing memory blowup if we run too far ahead.
3120  // Note that if a validationinterface callback ends up calling
3121  // ActivateBestChain this may lead to a deadlock! We should
3122  // probably have a DEBUG_LOCKORDER test for this in the future.
3124 
3125  std::vector<const CBlockIndex *> blocksToReconcile;
3126  bool blocks_connected = false;
3127 
3128  {
3129  LOCK(cs_main);
3130  // Lock transaction pool for at least as long as it takes for
3131  // connectTrace to be consumed
3132  LOCK(MempoolMutex());
3133  CBlockIndex *starting_tip = m_chain.Tip();
3134  do {
3135  // We absolutely may not unlock cs_main until we've made forward
3136  // progress (with the exception of shutdown due to hardware
3137  // issues, low disk space, etc).
3138 
3139  // Destructed before cs_main is unlocked
3140  ConnectTrace connectTrace;
3141 
3142  if (pindexMostWork == nullptr) {
3143  pindexMostWork = FindMostWorkChain(blocksToReconcile);
3144  }
3145 
3146  // Whether we have anything to do at all.
3147  if (pindexMostWork == nullptr ||
3148  pindexMostWork == m_chain.Tip()) {
3149  break;
3150  }
3151 
3152  bool fInvalidFound = false;
3153  std::shared_ptr<const CBlock> nullBlockPtr;
3154  if (!ActivateBestChainStep(
3155  config, state, pindexMostWork,
3156  pblock && pblock->GetHash() ==
3157  pindexMostWork->GetBlockHash()
3158  ? pblock
3159  : nullBlockPtr,
3160  fInvalidFound, connectTrace)) {
3161  // A system error occurred
3162  return false;
3163  }
3164  blocks_connected = true;
3165 
3166  if (fInvalidFound) {
3167  // Wipe cache, we may need another branch now.
3168  pindexMostWork = nullptr;
3169  }
3170 
3171  pindexNewTip = m_chain.Tip();
3172  for (const PerBlockConnectTrace &trace :
3173  connectTrace.GetBlocksConnected()) {
3174  assert(trace.pblock && trace.pindex);
3175  GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
3176  }
3177  } while (!m_chain.Tip() ||
3178  (starting_tip && CBlockIndexWorkComparator()(
3179  m_chain.Tip(), starting_tip)));
3180 
3181  // Check the index once we're done with the above loop, since
3182  // we're going to release cs_main soon. If the index is in a bad
3183  // state now, then it's better to know immediately rather than
3184  // randomly have it cause a problem in a race.
3185  CheckBlockIndex();
3186 
3187  if (blocks_connected) {
3188  const CBlockIndex *pindexFork = m_chain.FindFork(starting_tip);
3189  bool fInitialDownload = IsInitialBlockDownload();
3190 
3191  // Notify external listeners about the new tip.
3192  // Enqueue while holding cs_main to ensure that UpdatedBlockTip
3193  // is called in the order in which blocks are connected
3194  if (pindexFork != pindexNewTip) {
3195  // Notify ValidationInterface subscribers
3196  GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork,
3197  fInitialDownload);
3198 
3199  // Always notify the UI if a new block tip was connected
3200  uiInterface.NotifyBlockTip(
3201  GetSynchronizationState(fInitialDownload),
3202  pindexNewTip);
3203  }
3204  }
3205  }
3206  // When we reach this point, we switched to a new tip (stored in
3207  // pindexNewTip).
3208 
3209  for (const CBlockIndex *pindex : blocksToReconcile) {
3210  g_avalanche->addToReconcile(pindex);
3211  }
3212 
3213  if (!blocks_connected) {
3214  return true;
3215  }
3216 
3217  if (nStopAtHeight && pindexNewTip &&
3218  pindexNewTip->nHeight >= nStopAtHeight) {
3219  StartShutdown();
3220  }
3221 
3222  // We check shutdown only after giving ActivateBestChainStep a chance to
3223  // run once so that we never shutdown before connecting the genesis
3224  // block during LoadChainTip(). Previously this caused an assert()
3225  // failure during shutdown in such cases as the UTXO DB flushing checks
3226  // that the best block hash is non-null.
3227  if (ShutdownRequested()) {
3228  break;
3229  }
3230  } while (pindexNewTip != pindexMostWork);
3231 
3232  // Write changes periodically to disk, after relay.
3234  return false;
3235  }
3236 
3237  return true;
3238 }
3239 
3241  BlockValidationState &state,
3242  CBlockIndex *pindex) {
3245  {
3246  LOCK(cs_main);
3247  if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
3248  // Nothing to do, this block is not at the tip.
3249  return true;
3250  }
3251 
3253  // The chain has been extended since the last call, reset the
3254  // counter.
3256  }
3257 
3259  setBlockIndexCandidates.erase(pindex);
3261  if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
3262  // We can't keep reducing the counter if somebody really wants to
3263  // call preciousblock 2**31-1 times on the same set of tips...
3265  }
3266 
3267  // In case this was parked, unpark it.
3268  UnparkBlock(pindex);
3269 
3270  // Make sure it is added to the candidate list if appropriate.
3271  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3272  pindex->HaveTxsDownloaded()) {
3273  setBlockIndexCandidates.insert(pindex);
3275  }
3276  }
3277 
3278  return ActivateBestChain(config, state);
3279 }
3280 
3282  CBlockIndex *pindex, bool invalidate) {
3283  // Genesis block can't be invalidated or parked
3284  assert(pindex);
3285  if (pindex->nHeight == 0) {
3286  return false;
3287  }
3288 
3289  CBlockIndex *to_mark_failed_or_parked = pindex;
3290  bool pindex_was_in_chain = false;
3291  int disconnected = 0;
3292 
3293  // We do not allow ActivateBestChain() to run while UnwindBlock() is
3294  // running, as that could cause the tip to change while we disconnect
3295  // blocks. (Note for backport of Core PR16849: we acquire
3296  // LOCK(m_chainstate_mutex) in the Park, Invalidate and FinalizeBlock
3297  // functions due to differences in our code)
3299 
3300  // We'll be acquiring and releasing cs_main below, to allow the validation
3301  // callbacks to run. However, we should keep the block index in a
3302  // consistent state as we disconnect blocks -- in particular we need to
3303  // add equal-work blocks to setBlockIndexCandidates as we disconnect.
3304  // To avoid walking the block index repeatedly in search of candidates,
3305  // build a map once so that we can look up candidate blocks by chain
3306  // work as we go.
3307  std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
3308 
3309  {
3310  LOCK(cs_main);
3311  for (auto &entry : m_blockman.m_block_index) {
3312  CBlockIndex *candidate = &entry.second;
3313  // We don't need to put anything in our active chain into the
3314  // multimap, because those candidates will be found and considered
3315  // as we disconnect.
3316  // Instead, consider only non-active-chain blocks that have at
3317  // least as much work as where we expect the new tip to end up.
3318  if (!m_chain.Contains(candidate) &&
3319  !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
3320  candidate->IsValid(BlockValidity::TRANSACTIONS) &&
3321  candidate->HaveTxsDownloaded()) {
3322  candidate_blocks_by_work.insert(
3323  std::make_pair(candidate->nChainWork, candidate));
3324  }
3325  }
3326  }
3327 
3328  // Disconnect (descendants of) pindex, and mark them invalid.
3329  while (true) {
3330  if (ShutdownRequested()) {
3331  break;
3332  }
3333 
3334  // Make sure the queue of validation callbacks doesn't grow unboundedly.
3336 
3337  LOCK(cs_main);
3338  // Lock for as long as disconnectpool is in scope to make sure
3339  // UpdateMempoolForReorg is called after DisconnectTip without unlocking
3340  // in between
3341  LOCK(MempoolMutex());
3342 
3343  if (!m_chain.Contains(pindex)) {
3344  break;
3345  }
3346 
3347  pindex_was_in_chain = true;
3348  CBlockIndex *invalid_walk_tip = m_chain.Tip();
3349 
3350  // ActivateBestChain considers blocks already in m_chain
3351  // unconditionally valid already, so force disconnect away from it.
3352 
3353  DisconnectedBlockTransactions disconnectpool;
3354 
3355  bool ret = DisconnectTip(state, &disconnectpool);
3356 
3357  // DisconnectTip will add transactions to disconnectpool.
3358  // Adjust the mempool to be consistent with the new tip, adding
3359  // transactions back to the mempool if disconnecting was successful,
3360  // and we're not doing a very deep invalidation (in which case
3361  // keeping the mempool up to date is probably futile anyway).
3362  if (m_mempool) {
3363  disconnectpool.updateMempoolForReorg(
3364  config, *this,
3365  /* fAddToMempool = */ (++disconnected <= 10) && ret,
3366  *m_mempool);
3367  }
3368 
3369  if (!ret) {
3370  return false;
3371  }
3372 
3373  assert(invalid_walk_tip->pprev == m_chain.Tip());
3374 
3375  // We immediately mark the disconnected blocks as invalid.
3376  // This prevents a case where pruned nodes may fail to invalidateblock
3377  // and be left unable to start as they have no tip candidates (as there
3378  // are no blocks that meet the "have data and are not invalid per
3379  // nStatus" criteria for inclusion in setBlockIndexCandidates).
3380 
3381  invalid_walk_tip->nStatus =
3382  invalidate ? invalid_walk_tip->nStatus.withFailed()
3383  : invalid_walk_tip->nStatus.withParked();
3384 
3385  m_blockman.m_dirty_blockindex.insert(invalid_walk_tip);
3386  setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
3387 
3388  if (invalid_walk_tip == to_mark_failed_or_parked->pprev &&
3389  (invalidate ? to_mark_failed_or_parked->nStatus.hasFailed()
3390  : to_mark_failed_or_parked->nStatus.isParked())) {
3391  // We only want to mark the last disconnected block as
3392  // Failed (or Parked); its children need to be FailedParent (or
3393  // ParkedParent) instead.
3394  to_mark_failed_or_parked->nStatus =
3395  (invalidate
3396  ? to_mark_failed_or_parked->nStatus.withFailed(false)
3397  .withFailedParent()
3398  : to_mark_failed_or_parked->nStatus.withParked(false)
3399  .withParkedParent());
3400 
3401  m_blockman.m_dirty_blockindex.insert(to_mark_failed_or_parked);
3402  }
3403 
3404  // Add any equal or more work headers to setBlockIndexCandidates
3405  auto candidate_it = candidate_blocks_by_work.lower_bound(
3406  invalid_walk_tip->pprev->nChainWork);
3407  while (candidate_it != candidate_blocks_by_work.end()) {
3408  if (!CBlockIndexWorkComparator()(candidate_it->second,
3409  invalid_walk_tip->pprev)) {
3410  setBlockIndexCandidates.insert(candidate_it->second);
3411  candidate_it = candidate_blocks_by_work.erase(candidate_it);
3412  } else {
3413  ++candidate_it;
3414  }
3415  }
3416 
3417  // Track the last disconnected block, so we can correct its
3418  // FailedParent (or ParkedParent) status in future iterations, or, if
3419  // it's the last one, call InvalidChainFound on it.
3420  to_mark_failed_or_parked = invalid_walk_tip;
3421  }
3422 
3423  CheckBlockIndex();
3424 
3425  {
3426  LOCK(cs_main);
3427  if (m_chain.Contains(to_mark_failed_or_parked)) {
3428  // If the to-be-marked invalid block is in the active chain,
3429  // something is interfering and we can't proceed.
3430  return false;
3431  }
3432 
3433  // Mark pindex (or the last disconnected block) as invalid (or parked),
3434  // even when it never was in the main chain.
3435  to_mark_failed_or_parked->nStatus =
3436  invalidate ? to_mark_failed_or_parked->nStatus.withFailed()
3437  : to_mark_failed_or_parked->nStatus.withParked();
3438  m_blockman.m_dirty_blockindex.insert(to_mark_failed_or_parked);
3439  if (invalidate) {
3440  m_chainman.m_failed_blocks.insert(to_mark_failed_or_parked);
3441  }
3442 
3443  // If any new blocks somehow arrived while we were disconnecting
3444  // (above), then the pre-calculation of what should go into
3445  // setBlockIndexCandidates may have missed entries. This would
3446  // technically be an inconsistency in the block index, but if we clean
3447  // it up here, this should be an essentially unobservable error.
3448  // Loop back over all block index entries and add any missing entries
3449  // to setBlockIndexCandidates.
3450  for (auto &[_, block_index] : m_blockman.m_block_index) {
3451  if (block_index.IsValid(BlockValidity::TRANSACTIONS) &&
3452  block_index.HaveTxsDownloaded() &&
3453  !setBlockIndexCandidates.value_comp()(&block_index,
3454  m_chain.Tip())) {
3455  setBlockIndexCandidates.insert(&block_index);
3456  }
3457  }
3458 
3459  if (invalidate) {
3460  InvalidChainFound(to_mark_failed_or_parked);
3461  }
3462  }
3463 
3464  // Only notify about a new block tip if the active chain was modified.
3465  if (pindex_was_in_chain) {
3466  uiInterface.NotifyBlockTip(
3468  to_mark_failed_or_parked->pprev);
3469  }
3470  return true;
3471 }
3472 
3474  BlockValidationState &state,
3475  CBlockIndex *pindex) {
3478  // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
3480 
3481  return UnwindBlock(config, state, pindex, true);
3482 }
3483 
3485  CBlockIndex *pindex) {
3488  // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
3490 
3491  return UnwindBlock(config, state, pindex, false);
3492 }
3493 
3494 template <typename F>
3496  CBlockIndex *pindex, F f) {
3497  BlockStatus newStatus = f(pindex->nStatus);
3498  if (pindex->nStatus != newStatus &&
3499  (!pindexBase ||
3500  pindex->GetAncestor(pindexBase->nHeight) == pindexBase)) {
3501  pindex->nStatus = newStatus;
3502  m_blockman.m_dirty_blockindex.insert(pindex);
3503  if (newStatus.isValid()) {
3504  m_chainman.m_failed_blocks.erase(pindex);
3505  }
3506 
3507  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3508  pindex->HaveTxsDownloaded() &&
3509  setBlockIndexCandidates.value_comp()(m_chain.Tip(), pindex)) {
3510  setBlockIndexCandidates.insert(pindex);
3511  }
3512  return true;
3513  }
3514  return false;
3515 }
3516 
3517 template <typename F, typename C, typename AC>
3519  F f, C fChild, AC fAncestorWasChanged) {
3521 
3522  // Update the current block and ancestors; while we're doing this, identify
3523  // which was the deepest ancestor we changed.
3524  CBlockIndex *pindexDeepestChanged = pindex;
3525  for (auto pindexAncestor = pindex; pindexAncestor != nullptr;
3526  pindexAncestor = pindexAncestor->pprev) {
3527  if (UpdateFlagsForBlock(nullptr, pindexAncestor, f)) {
3528  pindexDeepestChanged = pindexAncestor;
3529  }
3530  }
3531 
3532  if (pindexReset &&
3533  pindexReset->GetAncestor(pindexDeepestChanged->nHeight) ==
3534  pindexDeepestChanged) {
3535  // reset pindexReset if it had a modified ancestor.
3536  pindexReset = nullptr;
3537  }
3538 
3539  // Update all blocks under modified blocks.
3540  for (auto &[_, block_index] : m_blockman.m_block_index) {
3541  UpdateFlagsForBlock(pindex, &block_index, fChild);
3542  UpdateFlagsForBlock(pindexDeepestChanged, &block_index,
3543  fAncestorWasChanged);
3544  }
3545 }
3546 
3549 
3550  UpdateFlags(
3551  pindex, m_chainman.m_best_invalid,
3552  [](const BlockStatus status) {
3553  return status.withClearedFailureFlags();
3554  },
3555  [](const BlockStatus status) {
3556  return status.withClearedFailureFlags();
3557  },
3558  [](const BlockStatus status) {
3559  return status.withFailedParent(false);
3560  });
3561 }
3562 
3563 void CChainState::UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren) {
3565 
3566  UpdateFlags(
3567  pindex, m_chainman.m_best_parked,
3568  [](const BlockStatus status) {
3569  return status.withClearedParkedFlags();
3570  },
3571  [fClearChildren](const BlockStatus status) {
3572  return fClearChildren ? status.withClearedParkedFlags()
3573  : status.withParkedParent(false);
3574  },
3575  [](const BlockStatus status) {
3576  return status.withParkedParent(false);
3577  });
3578 }
3579 
3581  return UnparkBlockImpl(pindex, true);
3582 }
3583 
3585  return UnparkBlockImpl(pindex, false);
3586 }
3587 
3589  if (!pindex) {
3590  return false;
3591  }
3592 
3593  if (!m_chain.Contains(pindex)) {
3595  "The block to mark finalized by avalanche is not on the "
3596  "active chain: %s\n",
3597  pindex->GetBlockHash().ToString());
3598  return false;
3599  }
3600 
3601  if (IsBlockAvalancheFinalized(pindex)) {
3602  return true;
3603  }
3604 
3606  m_avalancheFinalizedBlockIndex = pindex;
3607 
3608  GetMainSignals().BlockFinalized(pindex);
3609 
3610  return true;
3611 }
3612 
3615  m_avalancheFinalizedBlockIndex = nullptr;
3616 }
3617 
3620  return pindex && m_avalancheFinalizedBlockIndex &&
3621  m_avalancheFinalizedBlockIndex->GetAncestor(pindex->nHeight) ==
3622  pindex;
3623 }
3624 
3630  CBlockIndex *pindexNew,
3631  const FlatFilePos &pos) {
3632  pindexNew->nTx = block.vtx.size();
3633  pindexNew->nSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
3634  pindexNew->nFile = pos.nFile;
3635  pindexNew->nDataPos = pos.nPos;
3636  pindexNew->nUndoPos = 0;
3637  pindexNew->nStatus = pindexNew->nStatus.withData();
3639  m_blockman.m_dirty_blockindex.insert(pindexNew);
3640 
3641  if (pindexNew->UpdateChainStats()) {
3642  // If pindexNew is the genesis block or all parents are
3643  // BLOCK_VALID_TRANSACTIONS.
3644  std::deque<CBlockIndex *> queue;
3645  queue.push_back(pindexNew);
3646 
3647  // Recursively process any descendant blocks that now may be eligible to
3648  // be connected.
3649  while (!queue.empty()) {
3650  CBlockIndex *pindex = queue.front();
3651  queue.pop_front();
3652  pindex->UpdateChainStats();
3653  if (pindex->nSequenceId == 0) {
3654  // We assign a sequence is when transaction are received to
3655  // prevent a miner from being able to broadcast a block but not
3656  // its content. However, a sequence id may have been set
3657  // manually, for instance via PreciousBlock, in which case, we
3658  // don't need to assign one.
3659  pindex->nSequenceId = nBlockSequenceId++;
3660  }
3661 
3662  if (m_chain.Tip() == nullptr ||
3663  !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
3664  setBlockIndexCandidates.insert(pindex);
3665  }
3666 
3667  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
3668  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
3669  range = m_blockman.m_blocks_unlinked.equal_range(pindex);
3670  while (range.first != range.second) {
3671  std::multimap<CBlockIndex *, CBlockIndex *>::iterator it =
3672  range.first;
3673  queue.push_back(it->second);
3674  range.first++;
3675  m_blockman.m_blocks_unlinked.erase(it);
3676  }
3677  }
3678  } else if (pindexNew->pprev &&
3679  pindexNew->pprev->IsValid(BlockValidity::TREE)) {
3681  std::make_pair(pindexNew->pprev, pindexNew));
3682  }
3683 }
3684 
3693 static bool CheckBlockHeader(const CBlockHeader &block,
3694  BlockValidationState &state,
3695  const Consensus::Params &params,
3696  BlockValidationOptions validationOptions) {
3697  // Check proof of work matches claimed amount
3698  if (validationOptions.shouldValidatePoW() &&
3699  !CheckProofOfWork(block.GetHash(), block.nBits, params)) {
3701  "high-hash", "proof of work failed");
3702  }
3703 
3704  return true;
3705 }
3706 
3707 bool CheckBlock(const CBlock &block, BlockValidationState &state,
3708  const Consensus::Params &params,
3709  BlockValidationOptions validationOptions) {
3710  // These are checks that are independent of context.
3711  if (block.fChecked) {
3712  return true;
3713  }
3714 
3715  // Check that the header is valid (particularly PoW). This is mostly
3716  // redundant with the call in AcceptBlockHeader.
3717  if (!CheckBlockHeader(block, state, params, validationOptions)) {
3718  return false;
3719  }
3720 
3721  // Check the merkle root.
3722  if (validationOptions.shouldValidateMerkleRoot()) {
3723  bool mutated;
3724  uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3725  if (block.hashMerkleRoot != hashMerkleRoot2) {
3727  "bad-txnmrklroot", "hashMerkleRoot mismatch");
3728  }
3729 
3730  // Check for merkle tree malleability (CVE-2012-2459): repeating
3731  // sequences of transactions in a block without affecting the merkle
3732  // root of a block, while still invalidating it.
3733  if (mutated) {
3735  "bad-txns-duplicate", "duplicate transaction");
3736  }
3737  }
3738 
3739  // All potential-corruption validation must be done before we do any
3740  // transaction validation, as otherwise we may mark the header as invalid
3741  // because we receive the wrong transactions for it.
3742 
3743  // First transaction must be coinbase.
3744  if (block.vtx.empty()) {
3746  "bad-cb-missing", "first tx is not coinbase");
3747  }
3748 
3749  // Size limits.
3750  auto nMaxBlockSize = validationOptions.getExcessiveBlockSize();
3751 
3752  // Bail early if there is no way this block is of reasonable size.
3753  if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) {
3755  "bad-blk-length", "size limits failed");
3756  }
3757 
3758  auto currentBlockSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
3759  if (currentBlockSize > nMaxBlockSize) {
3761  "bad-blk-length", "size limits failed");
3762  }
3763 
3764  // And a valid coinbase.
3765  TxValidationState tx_state;
3766  if (!CheckCoinbase(*block.vtx[0], tx_state)) {
3768  tx_state.GetRejectReason(),
3769  strprintf("Coinbase check failed (txid %s) %s",
3770  block.vtx[0]->GetId().ToString(),
3771  tx_state.GetDebugMessage()));
3772  }
3773 
3774  // Check transactions for regularity, skipping the first. Note that this
3775  // is the first time we check that all after the first are !IsCoinBase.
3776  for (size_t i = 1; i < block.vtx.size(); i++) {
3777  auto *tx = block.vtx[i].get();
3778  if (!CheckRegularTransaction(*tx, tx_state)) {
3779  return state.Invalid(
3781  tx_state.GetRejectReason(),
3782  strprintf("Transaction check failed (txid %s) %s",
3783  tx->GetId().ToString(), tx_state.GetDebugMessage()));
3784  }
3785  }
3786 
3787  if (validationOptions.shouldValidatePoW() &&
3788  validationOptions.shouldValidateMerkleRoot()) {
3789  block.fChecked = true;
3790  }
3791 
3792  return true;
3793 }
3794 
3805 static bool
3807  const CBlockHeader &block,
3808  BlockValidationState &state, BlockManager &blockman,
3809  const CBlockIndex *pindexPrev, int64_t nAdjustedTime)
3812  assert(pindexPrev != nullptr);
3813  const int nHeight = pindexPrev->nHeight + 1;
3814 
3815  // Check proof of work
3816  if (block.nBits != GetNextWorkRequired(pindexPrev, &block, params)) {
3817  LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight);
3819  "bad-diffbits", "incorrect proof of work");
3820  }
3821 
3822  // Check against checkpoints
3823  if (fCheckpointsEnabled) {
3824  const CCheckpointData &checkpoints = params.Checkpoints();
3825 
3826  // Check that the block chain matches the known block chain up to a
3827  // checkpoint.
3828  if (!Checkpoints::CheckBlock(checkpoints, nHeight, block.GetHash())) {
3830  "ERROR: %s: rejected by checkpoint lock-in at %d\n",
3831  __func__, nHeight);
3833  "checkpoint mismatch");
3834  }
3835 
3836  // Don't accept any forks from the main chain prior to last checkpoint.
3837  // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's
3838  // in our BlockIndex().
3839 
3840  const CBlockIndex *pcheckpoint =
3841  blockman.GetLastCheckpoint(checkpoints);
3842  if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3844  "ERROR: %s: forked chain older than last checkpoint "
3845  "(height %d)\n",
3846  __func__, nHeight);
3848  "bad-fork-prior-to-checkpoint");
3849  }
3850  }
3851 
3852  // Check timestamp against prev
3853  if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) {
3855  "time-too-old", "block's timestamp is too early");
3856  }
3857 
3858  // Check timestamp
3859  if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME) {
3861  "time-too-new",
3862  "block timestamp too far in the future");
3863  }
3864 
3865  const Consensus::Params &consensusParams = params.GetConsensus();
3866  // Reject blocks with outdated version
3867  if ((block.nVersion < 2 &&
3868  DeploymentActiveAfter(pindexPrev, consensusParams,
3870  (block.nVersion < 3 &&
3871  DeploymentActiveAfter(pindexPrev, consensusParams,
3873  (block.nVersion < 4 &&
3874  DeploymentActiveAfter(pindexPrev, consensusParams,
3876  return state.Invalid(
3878  strprintf("bad-version(0x%08x)", block.nVersion),
3879  strprintf("rejected nVersion=0x%08x block", block.nVersion));
3880  }
3881 
3882  return true;
3883 }
3884 
3886  const CBlockIndex *active_chain_tip, const Consensus::Params &params,
3887  const CTransaction &tx, TxValidationState &state) {
3889  // TODO: Make active_chain_tip a reference
3890  assert(active_chain_tip);
3891 
3892  // ContextualCheckTransactionForCurrentBlock() uses
3893  // active_chain_tip.Height()+1 to evaluate nLockTime because when
3894  // IsFinalTx() is called within AcceptBlock(), the height of the
3895  // block *being* evaluated is what is used. Thus if we want to know if a
3896  // transaction can be part of the *next* block, we need to call
3897  // ContextualCheckTransaction() with one more than
3898  // active_chain_tip.Height().
3899  const int nBlockHeight = active_chain_tip->nHeight + 1;
3900 
3901  // BIP113 will require that time-locked transactions have nLockTime set to
3902  // less than the median time of the previous block they're contained in.
3903  // When the next block is created its previous block will be the current
3904  // chain tip, so we use that to calculate the median time passed to
3905  // ContextualCheckTransaction().
3906  const int64_t nLockTimeCutoff{active_chain_tip->GetMedianTimePast()};
3907 
3908  return ContextualCheckTransaction(params, tx, state, nBlockHeight,
3909  nLockTimeCutoff);
3910 }
3911 
3919 static bool ContextualCheckBlock(const CBlock &block,
3920  BlockValidationState &state,
3921  const Consensus::Params &params,
3922  const CBlockIndex *pindexPrev) {
3923  const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3924 
3925  // Enforce BIP113 (Median Time Past).
3926  int nLockTimeFlags = 0;
3927  if (DeploymentActiveAfter(pindexPrev, params, Consensus::DEPLOYMENT_CSV)) {
3928  assert(pindexPrev != nullptr);
3929  nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
3930  }
3931 
3932  const int64_t nMedianTimePast =
3933  pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast();
3934 
3935  const int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
3936  ? nMedianTimePast
3937  : block.GetBlockTime();
3938 
3939  const bool fIsMagneticAnomalyEnabled =
3940  IsMagneticAnomalyEnabled(params, pindexPrev);
3941 
3942  // Check transactions:
3943  // - canonical ordering
3944  // - ensure they are finalized
3945  // - check they have the minimum size
3946  const CTransaction *prevTx = nullptr;
3947  for (const auto &ptx : block.vtx) {
3948  const CTransaction &tx = *ptx;
3949  if (fIsMagneticAnomalyEnabled) {
3950  if (prevTx && (tx.GetId() <= prevTx->GetId())) {
3951  if (tx.GetId() == prevTx->GetId()) {
3953  "tx-duplicate",
3954  strprintf("Duplicated transaction %s",
3955  tx.GetId().ToString()));
3956  }
3957 
3958  return state.Invalid(
3960  strprintf("Transaction order is invalid (%s < %s)",
3961  tx.GetId().ToString(),
3962  prevTx->GetId().ToString()));
3963  }
3964 
3965  if (prevTx || !tx.IsCoinBase()) {
3966  prevTx = &tx;
3967  }
3968  }
3969 
3970  TxValidationState tx_state;
3971  if (!ContextualCheckTransaction(params, tx, tx_state, nHeight,
3972  nLockTimeCutoff)) {
3974  tx_state.GetRejectReason(),
3975  tx_state.GetDebugMessage());
3976  }
3977  }
3978 
3979  // Enforce rule that the coinbase starts with serialized block height
3980  if (DeploymentActiveAfter(pindexPrev, params,
3982  CScript expect = CScript() << nHeight;
3983  if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
3984  !std::equal(expect.begin(), expect.end(),
3985  block.vtx[0]->vin[0].scriptSig.begin())) {
3987  "bad-cb-height",
3988  "block height mismatch in coinbase");
3989  }
3990  }
3991 
3992  return true;
3993 }
3994 
4001  const CBlockHeader &block,
4002  BlockValidationState &state,
4003  CBlockIndex **ppindex) {
4005  const CChainParams &chainparams = config.GetChainParams();
4006 
4007  // Check for duplicate
4008  BlockHash hash = block.GetHash();
4009  BlockMap::iterator miSelf{m_blockman.m_block_index.find(hash)};
4010  if (hash != chainparams.GetConsensus().hashGenesisBlock) {
4011  if (miSelf != m_blockman.m_block_index.end()) {
4012  // Block header is already known.
4013  CBlockIndex *pindex = &(miSelf->second);
4014  if (ppindex) {
4015  *ppindex = pindex;
4016  }
4017 
4018  if (pindex->nStatus.isInvalid()) {
4019  LogPrint(BCLog::VALIDATION, "%s: block %s is marked invalid\n",
4020  __func__, hash.ToString());
4021  return state.Invalid(
4023  }
4024 
4025  return true;
4026  }
4027 
4028  if (!CheckBlockHeader(block, state, chainparams.GetConsensus(),
4029  BlockValidationOptions(config))) {
4031  "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__,
4032  hash.ToString(), state.ToString());
4033  return false;
4034  }
4035 
4036  // Get prev block index
4037  BlockMap::iterator mi{
4038  m_blockman.m_block_index.find(block.hashPrevBlock)};
4039  if (mi == m_blockman.m_block_index.end()) {
4041  "header %s has prev block not found: %s\n",
4042  hash.ToString(), block.hashPrevBlock.ToString());
4044  "prev-blk-not-found");
4045  }
4046 
4047  CBlockIndex *pindexPrev = &((*mi).second);
4048  assert(pindexPrev);
4049  if (pindexPrev->nStatus.isInvalid()) {
4051  "header %s has prev block invalid: %s\n", hash.ToString(),
4052  block.hashPrevBlock.ToString());
4054  "bad-prevblk");
4055  }
4056 
4057  if (!ContextualCheckBlockHeader(chainparams, block, state, m_blockman,
4058  pindexPrev, GetAdjustedTime())) {
4060  "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n",
4061  __func__, hash.ToString(), state.ToString());
4062  return false;
4063  }
4064 
4065  /* Determine if this block descends from any block which has been found
4066  * invalid (m_failed_blocks), then mark pindexPrev and any blocks
4067  * between them as failed. For example:
4068  *
4069  * D3
4070  * /
4071  * B2 - C2
4072  * / \
4073  * A D2 - E2 - F2
4074  * \
4075  * B1 - C1 - D1 - E1
4076  *
4077  * In the case that we attempted to reorg from E1 to F2, only to find
4078  * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
4079  * but NOT D3 (it was not in any of our candidate sets at the time).
4080  *
4081  * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
4082  * in LoadBlockIndex.
4083  */
4084  if (!pindexPrev->IsValid(BlockValidity::SCRIPTS)) {
4085  // The above does not mean "invalid": it checks if the previous
4086  // block hasn't been validated up to BlockValidity::SCRIPTS. This is
4087  // a performance optimization, in the common case of adding a new
4088  // block to the tip, we don't need to iterate over the failed blocks
4089  // list.
4090  for (const CBlockIndex *failedit : m_failed_blocks) {
4091  if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
4092  assert(failedit->nStatus.hasFailed());
4093  CBlockIndex *invalid_walk = pindexPrev;
4094  while (invalid_walk != failedit) {
4095  invalid_walk->nStatus =
4096  invalid_walk->nStatus.withFailedParent();
4097  m_blockman.m_dirty_blockindex.insert(invalid_walk);
4098  invalid_walk = invalid_walk->pprev;
4099  }
4101  "header %s has prev block invalid: %s\n",
4102  hash.ToString(), block.hashPrevBlock.ToString());
4103  return state.Invalid(
4105  "bad-prevblk");
4106  }
4107  }
4108  }
4109  }
4110 
4111  CBlockIndex *pindex{m_blockman.AddToBlockIndex(block, m_best_header)};
4112 
4113  if (ppindex) {
4114  *ppindex = pindex;
4115  }
4116 
4117  return true;
4118 }
4119 
4120 // Exposed wrapper for AcceptBlockHeader
4122  const Config &config, const std::vector<CBlockHeader> &headers,
4123  BlockValidationState &state, const CBlockIndex **ppindex) {
4125  {
4126  LOCK(cs_main);
4127  for (const CBlockHeader &header : headers) {
4128  // Use a temp pindex instead of ppindex to avoid a const_cast
4129  CBlockIndex *pindex = nullptr;
4130  bool accepted = AcceptBlockHeader(config, header, state, &pindex);
4132 
4133  if (!accepted) {
4134  return false;
4135  }
4136 
4137  if (ppindex) {
4138  *ppindex = pindex;
4139  }
4140  }
4141  }
4142 
4144  if (ActiveChainstate().IsInitialBlockDownload() && ppindex &&
4145  *ppindex) {
4146  const CBlockIndex &last_accepted{**ppindex};
4147  const int64_t blocks_left{
4148  (GetTime() - last_accepted.GetBlockTime()) /
4150  const double progress{100.0 * last_accepted.nHeight /
4151  (last_accepted.nHeight + blocks_left)};
4152  LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n",
4153  last_accepted.nHeight, progress);
4154  }
4155  }
4156  return true;
4157 }
4158 
4171  const std::shared_ptr<const CBlock> &pblock,
4172  BlockValidationState &state, bool fRequested,
4173  const FlatFilePos *dbp, bool *fNewBlock) {
4175 
4176  const CBlock &block = *pblock;
4177  if (fNewBlock) {
4178  *fNewBlock = false;
4179  }
4180 
4181  CBlockIndex *pindex = nullptr;
4182 
4183  bool accepted_header{
4184  m_chainman.AcceptBlockHeader(config, block, state, &pindex)};
4185  CheckBlockIndex();
4186 
4187  if (!accepted_header) {
4188  return false;
4189  }
4190 
4191  // Try to process all requested blocks that we don't have, but only
4192  // process an unrequested block if it's new and has enough work to
4193  // advance our tip, and isn't too many blocks ahead.
4194  bool fAlreadyHave = pindex->nStatus.hasData();
4195 
4196  // TODO: deal better with return value and error conditions for duplicate
4197  // and unrequested blocks.
4198  if (fAlreadyHave) {
4199  return true;
4200  }
4201 
4202  // Compare block header timestamps and received times of the block and the
4203  // chaintip. If they have the same chain height, use these diffs as a
4204  // tie-breaker, attempting to pick the more honestly-mined block.
4205  int64_t newBlockTimeDiff = std::llabs(pindex->GetReceivedTimeDiff());
4206  int64_t chainTipTimeDiff =
4207  m_chain.Tip() ? std::llabs(m_chain.Tip()->GetReceivedTimeDiff()) : 0;
4208 
4209  bool isSameHeight =
4210  m_chain.Tip() && (pindex->nChainWork == m_chain.Tip()->nChainWork);
4211  if (isSameHeight) {
4212  LogPrintf("Chain tip timestamp-to-received-time difference: hash=%s, "
4213  "diff=%d\n",
4214  m_chain.Tip()->GetBlockHash().ToString(), chainTipTimeDiff);
4215  LogPrintf("New block timestamp-to-received-time difference: hash=%s, "
4216  "diff=%d\n",
4217  pindex->GetBlockHash().ToString(), newBlockTimeDiff);
4218  }
4219 
4220  bool fHasMoreOrSameWork =
4221  (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork
4222  : true);
4223 
4224  // Blocks that are too out-of-order needlessly limit the effectiveness of
4225  // pruning, because pruning will not delete block files that contain any
4226  // blocks which are too close in height to the tip. Apply this test
4227  // regardless of whether pruning is enabled; it should generally be safe to
4228  // not process unrequested blocks.
4229  bool fTooFarAhead{pindex->nHeight >
4230  m_chain.Height() + int(MIN_BLOCKS_TO_KEEP)};
4231 
4232  // TODO: Decouple this function from the block download logic by removing
4233  // fRequested
4234  // This requires some new chain data structure to efficiently look up if a
4235  // block is in a chain leading to a candidate for best tip, despite not
4236  // being such a candidate itself.
4237  // Note that this would break the getblockfrompeer RPC
4238 
4239  // If we didn't ask for it:
4240  if (!fRequested) {
4241  // This is a previously-processed block that was pruned.
4242  if (pindex->nTx != 0) {
4243  return true;
4244  }
4245 
4246  // Don't process less-work chains.
4247  if (!fHasMoreOrSameWork) {
4248  return true;
4249  }
4250 
4251  // Block height is too high.
4252  if (fTooFarAhead) {
4253  return true;
4254  }
4255 
4256  // Protect against DoS attacks from low-work chains.
4257  // If our tip is behind, a peer could try to send us
4258  // low-work blocks on a fake chain that we would never
4259  // request; don't process these.
4260  if (pindex->nChainWork < nMinimumChainWork) {
4261  return true;
4262  }
4263  }
4264 
4265  const Consensus::Params &consensusParams = m_params.GetConsensus();
4266 
4267  if (!CheckBlock(block, state, consensusParams,
4268  BlockValidationOptions(config)) ||
4269  !ContextualCheckBlock(block, state, consensusParams, pindex->pprev)) {
4270  if (state.IsInvalid() &&
4272  pindex->nStatus = pindex->nStatus.withFailed();
4273  m_blockman.m_dirty_blockindex.insert(pindex);
4274  }
4275 
4276  return error("%s: %s (block %s)", __func__, state.ToString(),
4277  block.GetHash().ToString());
4278  }
4279 
4280  // If connecting the new block would require rewinding more than one block
4281  // from the active chain (i.e., a "deep reorg"), then mark the new block as
4282  // parked. If it has enough work then it will be automatically unparked
4283  // later, during FindMostWorkChain. We mark the block as parked at the very
4284  // last minute so we can make sure everything is ready to be reorged if
4285  // needed.
4286  if (gArgs.GetBoolArg("-parkdeepreorg", true)) {
4287  const CBlockIndex *pindexFork = m_chain.FindFork(pindex);
4288  if (pindexFork && pindexFork->nHeight + 1 < m_chain.Height()) {
4289  LogPrintf("Park block %s as it would cause a deep reorg.\n",
4290  pindex->GetBlockHash().ToString());
4291  pindex->nStatus = pindex->nStatus.withParked();
4292  m_blockman.m_dirty_blockindex.insert(pindex);
4293  }
4294  }
4295 
4296  // Header is valid/has work and the merkle tree is good.
4297  // Relay now, but if it does not build on our best tip, let the
4298  // SendMessages loop relay it.
4299  if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev) {
4300  GetMainSignals().NewPoWValidBlock(pindex, pblock);
4301  }
4302 
4303  // Write block to history file
4304  if (fNewBlock) {
4305  *fNewBlock = true;
4306  }
4307  try {
4309  block, pindex->nHeight, m_chain, m_params, dbp)};
4310  if (blockPos.IsNull()) {
4311  state.Error(strprintf(
4312  "%s: Failed to find position to write new block to disk",
4313  __func__));
4314  return false;
4315  }
4316  ReceivedBlockTransactions(block, pindex, blockPos);
4317  } catch (const std::runtime_error &e) {
4318  return AbortNode(state, std::string("System error: ") + e.what());
4319  }
4320 
4322 
4323  CheckBlockIndex();
4324 
4325  return true;
4326 }
4327 
4329  const Config &config, const std::shared_ptr<const CBlock> &block,
4330  bool force_processing, bool *new_block) {
4332 
4333  {
4334  if (new_block) {
4335  *new_block = false;
4336  }
4337 
4338  BlockValidationState state;
4339 
4340  // CheckBlock() does not support multi-threaded block validation
4341  // because CBlock::fChecked can cause data race.
4342  // Therefore, the following critical section must include the
4343  // CheckBlock() call as well.
4344  LOCK(cs_main);
4345 
4346  // Skipping AcceptBlock() for CheckBlock() failures means that we will
4347  // never mark a block as invalid if CheckBlock() fails. This is
4348  // protective against consensus failure if there are any unknown form
4349  // s of block malleability that cause CheckBlock() to fail; see e.g.
4350  // CVE-2012-2459 and
4351  // https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2019-February/016697.html.
4352  // Because CheckBlock() is not very expensive, the anti-DoS benefits of
4353  // caching failure (of a definitely-invalid block) are not substantial.
4354  bool ret =
4355  CheckBlock(*block, state, config.GetChainParams().GetConsensus(),
4356  BlockValidationOptions(config));
4357  if (ret) {
4358  // Store to disk
4359  ret = ActiveChainstate().AcceptBlock(
4360  config, block, state, force_processing, nullptr, new_block);
4361  }
4362 
4363  if (!ret) {
4364  GetMainSignals().BlockChecked(*block, state);
4365  return error("%s: AcceptBlock FAILED (%s)", __func__,
4366  state.ToString());
4367  }
4368  }
4369 
4371 
4372  // Only used to report errors, not invalidity - ignore it
4373  BlockValidationState state;
4374  if (!ActiveChainstate().ActivateBestChain(config, state, block)) {
4375  return error("%s: ActivateBestChain failed (%s)", __func__,
4376  state.ToString());
4377  }
4378 
4379  return true;
4380 }
4381 
4384  bool test_accept) {
4386  CChainState &active_chainstate = ActiveChainstate();
4387  if (!active_chainstate.GetMempool()) {
4388  TxValidationState state;
4389  state.Invalid(TxValidationResult::TX_NO_MEMPOOL, "no-mempool");
4390  return MempoolAcceptResult::Failure(state);
4391  }
4392  // Use GetConfig() temporarily. It will be removed in a follow-up by
4393  // making AcceptToMemoryPool take a CChainParams instead of a Config.
4394  // This avoids passing an extra Config argument to this function that will
4395  // be removed soon.
4396  auto result =
4397  AcceptToMemoryPool(::GetConfig(), active_chainstate, tx, GetTime(),
4398  /*bypass_limits=*/false, test_accept);
4399  active_chainstate.GetMempool()->check(
4400  active_chainstate.CoinsTip(), active_chainstate.m_chain.Height() + 1);
4401  return result;
4402 }
4403 
4405  CChainState &chainstate, const CBlock &block,
4406  CBlockIndex *pindexPrev,
4407  BlockValidationOptions validationOptions) {
4409  assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip());
4410  CCoinsViewCache viewNew(&chainstate.CoinsTip());
4411  BlockHash block_hash(block.GetHash());
4412  CBlockIndex indexDummy(block);
4413  indexDummy.pprev = pindexPrev;
4414  indexDummy.nHeight = pindexPrev->nHeight + 1;
4415  indexDummy.phashBlock = &block_hash;
4416 
4417  // NOTE: CheckBlockHeader is called by CheckBlock
4418  if (!ContextualCheckBlockHeader(params, block, state, chainstate.m_blockman,
4419  pindexPrev, GetAdjustedTime())) {
4420  return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__,
4421  state.ToString());
4422  }
4423 
4424  if (!CheckBlock(block, state, params.GetConsensus(), validationOptions)) {
4425  return error("%s: Consensus::CheckBlock: %s", __func__,
4426  state.ToString());
4427  }
4428 
4429  if (!ContextualCheckBlock(block, state, params.GetConsensus(),
4430  pindexPrev)) {
4431  return error("%s: Consensus::ContextualCheckBlock: %s", __func__,
4432  state.ToString());
4433  }
4434 
4435  if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew,
4436  validationOptions, true)) {
4437  return false;
4438  }
4439 
4440  assert(state.IsValid());
4441  return true;
4442 }
4443 
4444 /* This function is called from the RPC code for pruneblockchain */
4445 void PruneBlockFilesManual(CChainState &active_chainstate,
4446  int nManualPruneHeight) {
4447  BlockValidationState state;
4448  if (active_chainstate.FlushStateToDisk(state, FlushStateMode::NONE,
4449  nManualPruneHeight)) {
4450  LogPrintf("%s: failed to flush state (%s)\n", __func__,
4451  state.ToString());
4452  }
4453 }
4454 
4455 void CChainState::LoadMempool(const Config &config, const ArgsManager &args) {
4456  if (!m_mempool) {
4457  return;
4458  }
4459  if (args.GetBoolArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
4460  ::LoadMempool(config, *m_mempool, *this);
4461  }
4463 }
4464 
4467  const CCoinsViewCache &coins_cache = CoinsTip();
4468  // Never called when the coins view is empty
4469  assert(!coins_cache.GetBestBlock().IsNull());
4470  const CBlockIndex *tip = m_chain.Tip();
4471 
4472  if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
4473  return true;
4474  }
4475 
4476  // Load pointer to end of best chain
4477  CBlockIndex *pindex =
4478  m_blockman.LookupBlockIndex(coins_cache.GetBestBlock());
4479  if (!pindex) {
4480  return false;
4481  }
4482  m_chain.SetTip(pindex);
4484 
4485  tip = m_chain.Tip();
4486  LogPrintf(
4487  "Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
4488  tip->GetBlockHash().ToString(), m_chain.Height(),
4491  return true;
4492 }
4493 
4495  uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false);
4496 }
4497 
4499  uiInterface.ShowProgress("", 100, false);
4500 }
4501 
4502 bool CVerifyDB::VerifyDB(CChainState &chainstate, const Config &config,
4503  CCoinsView &coinsview, int nCheckLevel,
4504  int nCheckDepth) {
4506 
4507  const CChainParams &params = config.GetChainParams();
4508  const Consensus::Params &consensusParams = params.GetConsensus();
4509 
4510  if (chainstate.m_chain.Tip() == nullptr ||
4511  chainstate.m_chain.Tip()->pprev == nullptr) {
4512  return true;
4513  }
4514 
4515  // Verify blocks in the best chain
4516  if (nCheckDepth <= 0 || nCheckDepth > chainstate.m_chain.Height()) {
4517  nCheckDepth = chainstate.m_chain.Height();
4518  }
4519 
4520  nCheckLevel = std::max(0, std::min(4, nCheckLevel));
4521  LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth,
4522  nCheckLevel);
4523 
4524  CCoinsViewCache coins(&coinsview);
4525  CBlockIndex *pindex;
4526  CBlockIndex *pindexFailure = nullptr;
4527  int nGoodTransactions = 0;
4528  BlockValidationState state;
4529  int reportDone = 0;
4530  LogPrintfToBeContinued("[0%%]...");
4531 
4532  const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
4533 
4534  for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev;
4535  pindex = pindex->pprev) {
4536  const int percentageDone = std::max(
4537  1, std::min(99, (int)(((double)(chainstate.m_chain.Height() -
4538  pindex->nHeight)) /
4539  (double)nCheckDepth *
4540  (nCheckLevel >= 4 ? 50 : 100))));
4541  if (reportDone < percentageDone / 10) {
4542  // report every 10% step
4543  LogPrintfToBeContinued("[%d%%]...", percentageDone);
4544  reportDone = percentageDone / 10;
4545  }
4546 
4547  uiInterface.ShowProgress(_("Verifying blocks...").translated,
4548  percentageDone, false);
4549  if (pindex->nHeight <= chainstate.m_chain.Height() - nCheckDepth) {
4550  break;
4551  }
4552 
4553  if ((fPruneMode || is_snapshot_cs) && !pindex->nStatus.hasData()) {
4554  // If pruning or running under an assumeutxo snapshot, only go
4555  // back as far as we have data.
4556  LogPrintf("VerifyDB(): block verification stopping at height %d "
4557  "(pruning, no data)\n",
4558  pindex->nHeight);
4559  break;
4560  }
4561 
4562  CBlock block;
4563 
4564  // check level 0: read from disk
4565  if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
4566  return error(
4567  "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
4568  pindex->nHeight, pindex->GetBlockHash().ToString());
4569  }
4570 
4571  // check level 1: verify block validity
4572  if (nCheckLevel >= 1 && !CheckBlock(block, state, consensusParams,
4573  BlockValidationOptions(config))) {
4574  return error("%s: *** found bad block at %d, hash=%s (%s)\n",
4575  __func__, pindex->nHeight,
4576  pindex->GetBlockHash().ToString(), state.ToString());
4577  }
4578 
4579  // check level 2: verify undo validity
4580  if (nCheckLevel >= 2 && pindex) {
4581  CBlockUndo undo;
4582  if (!pindex->GetUndoPos().IsNull()) {
4583  if (!UndoReadFromDisk(undo, pindex)) {
4584  return error(
4585  "VerifyDB(): *** found bad undo data at %d, hash=%s\n",
4586  pindex->nHeight, pindex->GetBlockHash().ToString());
4587  }
4588  }
4589  }
4590  // check level 3: check for inconsistencies during memory-only
4591  // disconnect of tip blocks
4592  size_t curr_coins_usage = coins.DynamicMemoryUsage() +
4593  chainstate.CoinsTip().DynamicMemoryUsage();
4594 
4595  if (nCheckLevel >= 3 &&
4596  curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
4597  assert(coins.GetBestBlock() == pindex->GetBlockHash());
4598  DisconnectResult res =
4599  chainstate.DisconnectBlock(block, pindex, coins);
4600  if (res == DisconnectResult::FAILED) {
4601  return error("VerifyDB(): *** irrecoverable inconsistency in "
4602  "block data at %d, hash=%s",
4603  pindex->nHeight,
4604  pindex->GetBlockHash().ToString());
4605  }
4606 
4607  if (res == DisconnectResult::UNCLEAN) {
4608  nGoodTransactions = 0;
4609  pindexFailure = pindex;
4610  } else {
4611  nGoodTransactions += block.vtx.size();
4612  }
4613  }
4614 
4615  if (ShutdownRequested()) {
4616  return true;
4617  }
4618  }
4619 
4620  if (pindexFailure) {
4621  return error("VerifyDB(): *** coin database inconsistencies found "
4622  "(last %i blocks, %i good transactions before that)\n",
4623  chainstate.m_chain.Height() - pindexFailure->nHeight + 1,
4624  nGoodTransactions);
4625  }
4626 
4627  // store block count as we move pindex at check level >= 4
4628  int block_count = chainstate.m_chain.Height() - pindex->nHeight;
4629 
4630  // check level 4: try reconnecting blocks
4631  if (nCheckLevel >= 4) {
4632  while (pindex != chainstate.m_chain.Tip()) {
4633  const int percentageDone = std::max(
4634  1, std::min(99, 100 - int(double(chainstate.m_chain.Height() -
4635  pindex->nHeight) /
4636  double(nCheckDepth) * 50)));
4637  if (reportDone < percentageDone / 10) {
4638  // report every 10% step
4639  LogPrintfToBeContinued("[%d%%]...", percentageDone);
4640  reportDone = percentageDone / 10;
4641  }
4642  uiInterface.ShowProgress(_("Verifying blocks...").translated,
4643  percentageDone, false);
4644  pindex = chainstate.m_chain.Next(pindex);
4645  CBlock block;
4646  if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
4647  return error(
4648  "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
4649  pindex->nHeight, pindex->GetBlockHash().ToString());
4650  }
4651  if (!chainstate.ConnectBlock(block, state, pindex, coins,
4652  BlockValidationOptions(config))) {
4653  return error("VerifyDB(): *** found unconnectable block at %d, "
4654  "hash=%s (%s)",
4655  pindex->nHeight, pindex->GetBlockHash().ToString(),
4656  state.ToString());
4657  }
4658  if (ShutdownRequested()) {
4659  return true;
4660  }
4661  }
4662  }
4663 
4664  LogPrintf("[DONE].\n");
4665  LogPrintf("No coin database inconsistencies in last %i blocks (%i "
4666  "transactions)\n",
4667  block_count, nGoodTransactions);
4668 
4669  return true;
4670 }
4671 
4677  CCoinsViewCache &view) {
4679  // TODO: merge with ConnectBlock
4680  CBlock block;
4681  if (!ReadBlockFromDisk(block, pindex, m_params.GetConsensus())) {
4682  return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s",
4683  pindex->nHeight, pindex->GetBlockHash().ToString());
4684  }
4685 
4686  for (const CTransactionRef &tx : block.vtx) {
4687  // Pass check = true as every addition may be an overwrite.
4688  AddCoins(view, *tx, pindex->nHeight, true);
4689  }
4690 
4691  for (const CTransactionRef &tx : block.vtx) {
4692  if (tx->IsCoinBase()) {
4693  continue;
4694  }
4695 
4696  for (const CTxIn &txin : tx->vin) {
4697  view.SpendCoin(txin.prevout);
4698  }
4699  }
4700 
4701  return true;
4702 }
4703 
4705  LOCK(cs_main);
4706 
4707  CCoinsView &db = this->CoinsDB();
4708  CCoinsViewCache cache(&db);
4709 
4710  std::vector<BlockHash> hashHeads = db.GetHeadBlocks();
4711  if (hashHeads.empty()) {
4712  // We're already in a consistent state.
4713  return true;
4714  }
4715  if (hashHeads.size() != 2) {
4716  return error("ReplayBlocks(): unknown inconsistent state");
4717  }
4718 
4719  uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false);
4720  LogPrintf("Replaying blocks\n");
4721 
4722  // Old tip during the interrupted flush.
4723  const CBlockIndex *pindexOld = nullptr;
4724  // New tip during the interrupted flush.
4725  const CBlockIndex *pindexNew;
4726  // Latest block common to both the old and the new tip.
4727  const CBlockIndex *pindexFork = nullptr;
4728 
4729  if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
4730  return error(
4731  "ReplayBlocks(): reorganization to unknown block requested");
4732  }
4733 
4734  pindexNew = &(m_blockman.m_block_index[hashHeads[0]]);
4735 
4736  if (!hashHeads[1].IsNull()) {
4737  // The old tip is allowed to be 0, indicating it's the first flush.
4738  if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
4739  return error(
4740  "ReplayBlocks(): reorganization from unknown block requested");
4741  }
4742 
4743  pindexOld = &(m_blockman.m_block_index[hashHeads[1]]);
4744  pindexFork = LastCommonAncestor(pindexOld, pindexNew);
4745  assert(pindexFork != nullptr);
4746  }
4747 
4748  // Rollback along the old branch.
4749  while (pindexOld != pindexFork) {
4750  if (pindexOld->nHeight > 0) {
4751  // Never disconnect the genesis block.
4752  CBlock block;
4753  if (!ReadBlockFromDisk(block, pindexOld, m_params.GetConsensus())) {
4754  return error("RollbackBlock(): ReadBlockFromDisk() failed at "
4755  "%d, hash=%s",
4756  pindexOld->nHeight,
4757  pindexOld->GetBlockHash().ToString());
4758  }
4759 
4760  LogPrintf("Rolling back %s (%i)\n",
4761  pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
4762  DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
4763  if (res == DisconnectResult::FAILED) {
4764  return error(
4765  "RollbackBlock(): DisconnectBlock failed at %d, hash=%s",
4766  pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4767  }
4768 
4769  // If DisconnectResult::UNCLEAN is returned, it means a non-existing
4770  // UTXO was deleted, or an existing UTXO was overwritten. It
4771  // corresponds to cases where the block-to-be-disconnect never had
4772  // all its operations applied to the UTXO set. However, as both
4773  // writing a UTXO and deleting a UTXO are idempotent operations, the
4774  // result is still a version of the UTXO set with the effects of
4775  // that block undone.
4776  }
4777  pindexOld = pindexOld->pprev;
4778  }
4779 
4780  // Roll forward from the forking point to the new tip.
4781  int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
4782  for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight;
4783  ++nHeight) {
4784  const CBlockIndex *pindex = pindexNew->GetAncestor(nHeight);
4785  LogPrintf("Rolling forward %s (%i)\n",
4786  pindex->GetBlockHash().ToString(), nHeight);
4787  uiInterface.ShowProgress(_("Replaying blocks...").translated,
4788  (int)((nHeight - nForkHeight) * 100.0 /
4789  (pindexNew->nHeight - nForkHeight)),
4790  false);
4791  if (!RollforwardBlock(pindex, cache)) {
4792  return false;
4793  }
4794  }
4795 
4796  cache.SetBestBlock(pindexNew->GetBlockHash());
4797  cache.Flush();
4798  uiInterface.ShowProgress("", 100, false);
4799  return true;
4800 }
4801 
4802 // May NOT be used after any connections are up as much of the peer-processing
4803 // logic assumes a consistent block index state
4806  nBlockSequenceId = 1;
4807  m_best_fork_tip = nullptr;
4808  m_best_fork_base = nullptr;
4809  setBlockIndexCandidates.clear();
4810 }
4811 
4814  // Load block index from databases
4815  bool needs_init = fReindex;
4816  if (!fReindex) {
4817  bool ret = m_blockman.LoadBlockIndexDB();
4818  if (!ret) {
4819  return false;
4820  }
4821 
4822  std::vector<CBlockIndex *> vSortedByHeight{
4823  m_blockman.GetAllBlockIndices()};
4824  std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
4826 
4827  // Find start of assumed-valid region.
4828  int first_assumed_valid_height = std::numeric_limits<int>::max();
4829 
4830  for (const CBlockIndex *block : vSortedByHeight) {
4831  if (block->IsAssumedValid()) {
4832  auto chainstates = GetAll();
4833 
4834  // If we encounter an assumed-valid block index entry, ensure
4835  // that we have one chainstate that tolerates assumed-valid
4836  // entries and another that does not (i.e. the background
4837  // validation chainstate), since assumed-valid entries should
4838  // always be pending validation by a fully-validated chainstate.
4839  auto any_chain = [&](auto fnc) {
4840  return std::any_of(chainstates.cbegin(), chainstates.cend(),
4841  fnc);
4842  };
4843  assert(any_chain([](auto chainstate) {
4844  return chainstate->reliesOnAssumedValid();
4845  }));
4846  assert(any_chain([](auto chainstate) {
4847  return !chainstate->reliesOnAssumedValid();
4848  }));
4849 
4850  first_assumed_valid_height = block->nHeight;
4851  break;
4852  }
4853  }
4854 
4855  for (CBlockIndex *pindex : vSortedByHeight) {
4856  if (ShutdownRequested()) {
4857  return false;
4858  }
4859  if (pindex->IsAssumedValid() ||
4861  (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
4862  // Fill each chainstate's block candidate set. Only add
4863  // assumed-valid blocks to the tip candidate set if the
4864  // chainstate is allowed to rely on assumed-valid blocks.
4865  //
4866  // If all setBlockIndexCandidates contained the assumed-valid
4867  // blocks, the background chainstate's ActivateBestChain() call
4868  // would add assumed-valid blocks to the chain (based on how
4869  // FindMostWorkChain() works). Obviously we don't want this
4870  // since the purpose of the background validation chain is to
4871  // validate assumed-valid blocks.
4872  //
4873  // Note: This is considering all blocks whose height is greater
4874  // or equal to the first assumed-valid block to be assumed-valid
4875  // blocks, and excluding them from the background chainstate's
4876  // setBlockIndexCandidates set. This does mean that some blocks
4877  // which are not technically assumed-valid (later blocks on a
4878  // fork beginning before the first assumed-valid block) might
4879  // not get added to the background chainstate, but this is ok,
4880  // because they will still be attached to the active chainstate
4881  // if they actually contain more work.
4882  //
4883  // Instead of this height-based approach, an earlier attempt was
4884  // made at detecting "holistically" whether the block index
4885  // under consideration relied on an assumed-valid ancestor, but
4886  // this proved to be too slow to be practical.
4887  for (CChainState *chainstate : GetAll()) {
4888  if (chainstate->reliesOnAssumedValid() ||
4889  pindex->nHeight < first_assumed_valid_height) {
4890  chainstate->setBlockIndexCandidates.insert(pindex);
4891  }
4892  }
4893  }
4894 
4895  if (pindex->nStatus.isInvalid() &&
4896  (!m_best_invalid ||
4897  pindex->nChainWork > m_best_invalid->nChainWork)) {
4898  m_best_invalid = pindex;
4899  }
4900 
4901  if (pindex->nStatus.isOnParkedChain() &&
4902  (!m_best_parked ||
4903  pindex->nChainWork > m_best_parked->nChainWork)) {
4904  m_best_parked = pindex;
4905  }
4906 
4907  if (pindex->IsValid(BlockValidity::TREE) &&
4908  (m_best_header == nullptr ||
4910  m_best_header = pindex;
4911  }
4912  }
4913 
4914  needs_init = m_blockman.m_block_index.empty();
4915  }
4916 
4917  if (needs_init) {
4918  // Everything here is for *new* reindex/DBs. Thus, though
4919  // LoadBlockIndexDB may have set fReindex if we shut down
4920  // mid-reindex previously, we don't check fReindex and
4921  // instead only check it prior to LoadBlockIndexDB to set
4922  // needs_init.
4923 
4924  LogPrintf("Initializing databases...\n");
4925  }
4926  return true;
4927 }
4928 
4930  LOCK(cs_main);
4931 
4932  // Check whether we're already initialized by checking for genesis in
4933  // m_blockman.m_block_index. Note that we can't use m_chain here, since it
4934  // is set based on the coins db, not the block index db, which is the only
4935  // thing loaded at this point.
4936  if (m_blockman.m_block_index.count(m_params.GenesisBlock().GetHash())) {
4937  return true;
4938  }
4939 
4940  try {
4941  const CBlock &block = m_params.GenesisBlock();
4942  FlatFilePos blockPos{
4943  m_blockman.SaveBlockToDisk(block, 0, m_chain, m_params, nullptr)};
4944  if (blockPos.IsNull()) {
4945  return error("%s: writing genesis block to disk failed", __func__);
4946  }
4947  CBlockIndex *pindex =
4948  m_blockman.AddToBlockIndex(block, m_chainman.m_best_header);
4949  ReceivedBlockTransactions(block, pindex, blockPos);
4950  } catch (const std::runtime_error &e) {
4951  return error("%s: failed to write genesis block: %s", __func__,
4952  e.what());
4953  }
4954 
4955  return true;
4956 }
4957 
4958 void CChainState::LoadExternalBlockFile(const Config &config, FILE *fileIn,
4959  FlatFilePos *dbp) {
4961  // Map of disk positions for blocks with unknown parent (only used for
4962  // reindex)
4963  static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
4964  int64_t nStart = GetTimeMillis();
4965 
4966  int nLoaded = 0;
4967  try {
4968  // This takes over fileIn and calls fclose() on it in the CBufferedFile
4969  // destructor. Make sure we have at least 2*MAX_TX_SIZE space in there
4970  // so any transaction can fit in the buffer.
4971  CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK,
4972  CLIENT_VERSION);
4973  uint64_t nRewind = blkdat.GetPos();
4974  while (!blkdat.eof()) {
4975  if (ShutdownRequested()) {
4976  return;
4977  }
4978 
4979  blkdat.SetPos(nRewind);
4980  // Start one byte further next time, in case of failure.
4981  nRewind++;
4982  // Remove former limit.
4983  blkdat.SetLimit();
4984  unsigned int nSize = 0;
4985  try {
4986  // Locate a header.
4988  blkdat.FindByte(char(m_params.DiskMagic()[0]));
4989  nRewind = blkdat.GetPos() + 1;
4990  blkdat >> buf;
4991  if (memcmp(buf, m_params.DiskMagic().data(),
4993  continue;
4994  }
4995 
4996  // Read size.
4997  blkdat >> nSize;
4998  if (nSize < 80) {
4999  continue;
5000  }
5001  } catch (const std::exception &) {
5002  // No valid block header found; don't complain.
5003  break;
5004  }
5005 
5006  try {
5007  // read block
5008  uint64_t nBlockPos = blkdat.GetPos();
5009  if (dbp) {
5010  dbp->nPos = nBlockPos;
5011  }
5012  blkdat.SetLimit(nBlockPos + nSize);
5013  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
5014  CBlock &block = *pblock;
5015  blkdat >> block;
5016  nRewind = blkdat.GetPos();
5017 
5018  const BlockHash hash = block.GetHash();
5019  {
5020  LOCK(cs_main);
5021  // detect out of order blocks, and store them for later
5022  if (hash != m_params.GetConsensus().hashGenesisBlock &&
5024  LogPrint(
5026  "%s: Out of order block %s, parent %s not known\n",
5027  __func__, hash.ToString(),
5028  block.hashPrevBlock.ToString());
5029  if (dbp) {
5030  mapBlocksUnknownParent.insert(
5031  std::make_pair(block.hashPrevBlock, *dbp));
5032  }
5033  continue;
5034  }
5035 
5036  // process in case the block isn't known yet
5037  const CBlockIndex *pindex =
5039  if (!pindex || !pindex->nStatus.hasData()) {
5040  BlockValidationState state;
5041  if (AcceptBlock(config, pblock, state, true, dbp,
5042  nullptr)) {
5043  nLoaded++;
5044  }
5045  if (state.IsError()) {
5046  break;
5047  }
5048  } else if (hash !=
5050  pindex->nHeight % 1000 == 0) {
5051  LogPrint(
5053  "Block Import: already had block %s at height %d\n",
5054  hash.ToString(), pindex->nHeight);
5055  }
5056  }
5057 
5058  // Activate the genesis block so normal node progress can
5059  // continue
5060  if (hash == m_params.GetConsensus().hashGenesisBlock) {
5061  BlockValidationState state;
5062  if (!ActivateBestChain(config, state, nullptr)) {
5063  break;
5064  }
5065  }
5066 
5067  NotifyHeaderTip(*this);
5068 
5069  // Recursively process earlier encountered successors of this
5070  // block
5071  std::deque<uint256> queue;
5072  queue.push_back(hash);
5073  while (!queue.empty()) {
5074  uint256 head = queue.front();
5075  queue.pop_front();
5076  std::pair<std::multimap<uint256, FlatFilePos>::iterator,
5077  std::multimap<uint256, FlatFilePos>::iterator>
5078  range = mapBlocksUnknownParent.equal_range(head);
5079  while (range.first != range.second) {
5080  std::multimap<uint256, FlatFilePos>::iterator it =
5081  range.first;
5082  std::shared_ptr<CBlock> pblockrecursive =
5083  std::make_shared<CBlock>();
5084  if (ReadBlockFromDisk(*pblockrecursive, it->second,
5085  m_params.GetConsensus())) {
5086  LogPrint(
5088  "%s: Processing out of order child %s of %s\n",
5089  __func__, pblockrecursive->GetHash().ToString(),
5090  head.ToString());
5091  LOCK(cs_main);
5092  BlockValidationState dummy;
5093  if (AcceptBlock(config, pblockrecursive, dummy,
5094  true, &it->second, nullptr)) {
5095  nLoaded++;
5096  queue.push_back(pblockrecursive->GetHash());
5097  }
5098  }
5099  range.first++;
5100  mapBlocksUnknownParent.erase(it);
5101  NotifyHeaderTip(*this);
5102  }
5103  }
5104  } catch (const std::exception &e) {
5105  LogPrintf("%s: Deserialize or I/O error - %s\n", __func__,
5106  e.what());
5107  }
5108  }
5109  } catch (const std::runtime_error &e) {
5110  AbortNode(std::string("System error: ") + e.what());
5111  }
5112 
5113  LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded,
5114  GetTimeMillis() - nStart);
5115 }
5116 
5118  if (!fCheckBlockIndex) {
5119  return;
5120  }
5121 
5122  LOCK(cs_main);
5123 
5124  // During a reindex, we read the genesis block and call CheckBlockIndex
5125  // before ActivateBestChain, so we have the genesis block in
5126  // m_blockman.m_block_index but no active chain. (A few of the tests when
5127  // iterating the block tree require that m_chain has been initialized.)
5128  if (m_chain.Height() < 0) {
5129  assert(m_blockman.m_block_index.size() <= 1);
5130  return;
5131  }
5132 
5133  // Build forward-pointing map of the entire block tree.
5134  std::multimap<CBlockIndex *, CBlockIndex *> forward;
5135  for (auto &[_, block_index] : m_blockman.m_block_index) {
5136  forward.emplace(block_index.pprev, &block_index);
5137  }
5138 
5139  assert(forward.size() == m_blockman.m_block_index.size());
5140 
5141  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5142  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5143  rangeGenesis = forward.equal_range(nullptr);
5144  CBlockIndex *pindex = rangeGenesis.first->second;
5145  rangeGenesis.first++;
5146  // There is only one index entry with parent nullptr.
5147  assert(rangeGenesis.first == rangeGenesis.second);
5148 
5149  // Iterate over the entire block tree, using depth-first search.
5150  // Along the way, remember whether there are blocks on the path from genesis
5151  // block being explored which are the first to have certain properties.
5152  size_t nNodes = 0;
5153  int nHeight = 0;
5154  // Oldest ancestor of pindex which is invalid.
5155  CBlockIndex *pindexFirstInvalid = nullptr;
5156  // Oldest ancestor of pindex which is parked.
5157  CBlockIndex *pindexFirstParked = nullptr;
5158  // Oldest ancestor of pindex which does not have data available.
5159  CBlockIndex *pindexFirstMissing = nullptr;
5160  // Oldest ancestor of pindex for which nTx == 0.
5161  CBlockIndex *pindexFirstNeverProcessed = nullptr;
5162  // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE
5163  // (regardless of being valid or not).
5164  CBlockIndex *pindexFirstNotTreeValid = nullptr;
5165  // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS
5166  // (regardless of being valid or not).
5167  CBlockIndex *pindexFirstNotTransactionsValid = nullptr;
5168  // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN
5169  // (regardless of being valid or not).
5170  CBlockIndex *pindexFirstNotChainValid = nullptr;
5171  // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS
5172  // (regardless of being valid or not).
5173  CBlockIndex *pindexFirstNotScriptsValid = nullptr;
5174  while (pindex != nullptr) {
5175  nNodes++;
5176  if (pindexFirstInvalid == nullptr && pindex->nStatus.hasFailed()) {
5177  pindexFirstInvalid = pindex;
5178  }
5179  if (pindexFirstParked == nullptr && pindex->nStatus.isParked()) {
5180  pindexFirstParked = pindex;
5181  }
5182  // Assumed-valid index entries will not have data since we haven't
5183  // downloaded the full block yet.
5184  if (pindexFirstMissing == nullptr && !pindex->nStatus.hasData() &&
5185  !pindex->IsAssumedValid()) {
5186  pindexFirstMissing = pindex;
5187  }
5188  if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) {
5189  pindexFirstNeverProcessed = pindex;
5190  }
5191  if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr &&
5192  pindex->nStatus.getValidity() < BlockValidity::TREE) {
5193  pindexFirstNotTreeValid = pindex;
5194  }
5195  if (pindex->pprev != nullptr && !pindex->IsAssumedValid()) {
5196  if (pindexFirstNotTransactionsValid == nullptr &&
5197  pindex->nStatus.getValidity() < BlockValidity::TRANSACTIONS) {
5198  pindexFirstNotTransactionsValid = pindex;
5199  }
5200  if (pindexFirstNotChainValid == nullptr &&
5201  pindex->nStatus.getValidity() < BlockValidity::CHAIN) {
5202  pindexFirstNotChainValid = pindex;
5203  }
5204  if (pindexFirstNotScriptsValid == nullptr &&
5205  pindex->nStatus.getValidity() < BlockValidity::SCRIPTS) {
5206  pindexFirstNotScriptsValid = pindex;
5207  }
5208  }
5209 
5210  // Begin: actual consistency checks.
5211  if (pindex->pprev == nullptr) {
5212  // Genesis block checks.
5213  // Genesis block's hash must match.
5214  assert(pindex->GetBlockHash() ==
5216  // The current active chain's genesis block must be this block.
5217  assert(pindex == m_chain.Genesis());
5218  }
5219  if (!pindex->HaveTxsDownloaded()) {
5220  // nSequenceId can't be set positive for blocks that aren't linked
5221  // (negative is used for preciousblock)
5222  assert(pindex->nSequenceId <= 0);
5223  }
5224  // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or
5225  // not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0
5226  // (or VALID_TRANSACTIONS) if no pruning has occurred.
5227  // Unless these indexes are assumed valid and pending block download on
5228  // a background chainstate.
5229  if (!m_blockman.m_have_pruned && !pindex->IsAssumedValid()) {
5230  // If we've never pruned, then HAVE_DATA should be equivalent to nTx
5231  // > 0
5232  assert(pindex->nStatus.hasData() == (pindex->nTx > 0));
5233  assert(pindexFirstMissing == pindexFirstNeverProcessed);
5234  } else if (pindex->nStatus.hasData()) {
5235  // If we have pruned, then we can only say that HAVE_DATA implies
5236  // nTx > 0
5237  assert(pindex->nTx > 0);
5238  }
5239  if (pindex->nStatus.hasUndo()) {
5240  assert(pindex->nStatus.hasData());
5241  }
5242  if (pindex->IsAssumedValid()) {
5243  // Assumed-valid blocks should have some nTx value.
5244  assert(pindex->nTx > 0);
5245  // Assumed-valid blocks should connect to the main chain.
5246  assert(pindex->nStatus.getValidity() >= BlockValidity::TREE);
5247  } else {
5248  // Otherwise there should only be an nTx value if we have
5249  // actually seen a block's transactions.
5250  // This is pruning-independent.
5251  assert((pindex->nStatus.getValidity() >=
5252  BlockValidity::TRANSACTIONS) == (pindex->nTx > 0));
5253  }
5254  // All parents having had data (at some point) is equivalent to all
5255  // parents being VALID_TRANSACTIONS, which is equivalent to
5256  // HaveTxsDownloaded(). All parents having had data (at some point) is
5257  // equivalent to all parents being VALID_TRANSACTIONS, which is
5258  // equivalent to HaveTxsDownloaded().
5259  assert((pindexFirstNeverProcessed == nullptr) ==
5260  (pindex->HaveTxsDownloaded()));
5261  assert((pindexFirstNotTransactionsValid == nullptr) ==
5262  (pindex->HaveTxsDownloaded()));
5263  // nHeight must be consistent.
5264  assert(pindex->nHeight == nHeight);
5265  // For every block except the genesis block, the chainwork must be
5266  // larger than the parent's.
5267  assert(pindex->pprev == nullptr ||
5268  pindex->nChainWork >= pindex->pprev->nChainWork);
5269  // The pskip pointer must point back for all but the first 2 blocks.
5270  assert(nHeight < 2 ||
5271  (pindex->pskip && (pindex->pskip->nHeight < nHeight)));
5272  // All m_blockman.m_block_index entries must at least be TREE valid
5273  assert(pindexFirstNotTreeValid == nullptr);
5274  if (pindex->nStatus.getValidity() >= BlockValidity::TREE) {
5275  // TREE valid implies all parents are TREE valid
5276  assert(pindexFirstNotTreeValid == nullptr);
5277  }
5278  if (pindex->nStatus.getValidity() >= BlockValidity::CHAIN) {
5279  // CHAIN valid implies all parents are CHAIN valid
5280  assert(pindexFirstNotChainValid == nullptr);
5281  }
5282  if (pindex->nStatus.getValidity() >= BlockValidity::SCRIPTS) {
5283  // SCRIPTS valid implies all parents are SCRIPTS valid
5284  assert(pindexFirstNotScriptsValid == nullptr);
5285  }
5286  if (pindexFirstInvalid == nullptr) {
5287  // Checks for not-invalid blocks.
5288  // The failed mask cannot be set for blocks without invalid parents.
5289  assert(!pindex->nStatus.isInvalid());
5290  }
5291  if (pindexFirstParked == nullptr) {
5292  // Checks for not-parked blocks.
5293  // The parked mask cannot be set for blocks without parked parents.
5294  // (i.e., hasParkedParent only if an ancestor is properly parked).
5295  assert(!pindex->nStatus.isOnParkedChain());
5296  }
5297  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
5298  pindexFirstNeverProcessed == nullptr) {
5299  if (pindexFirstInvalid == nullptr) {
5300  // Don't perform this check for the background chainstate since
5301  // its setBlockIndexCandidates shouldn't have some entries (i.e.
5302  // those past the snapshot block) which do exist in the block
5303  // index for the active chainstate.
5304  if (this == &m_chainman.ActiveChainstate()) {
5305  // If this block sorts at least as good as the current tip
5306  // and is valid and we have all data for its parents, it
5307  // must be in setBlockIndexCandidates or be parked.
5308  if (pindexFirstMissing == nullptr) {
5309  assert(pindex->nStatus.isOnParkedChain() ||
5310  setBlockIndexCandidates.count(pindex));
5311  }
5312  // m_chain.Tip() must also be there even if some data has
5313  // been pruned.
5314  if (pindex == m_chain.Tip()) {
5315  assert(setBlockIndexCandidates.count(pindex));
5316  }
5317  }
5318  // If some parent is missing, then it could be that this block
5319  // was in setBlockIndexCandidates but had to be removed because
5320  // of the missing data. In this case it must be in
5321  // m_blocks_unlinked -- see test below.
5322  }
5323  } else {
5324  // If this block sorts worse than the current tip or some ancestor's
5325  // block has never been seen, it cannot be in
5326  // setBlockIndexCandidates.
5327  assert(setBlockIndexCandidates.count(pindex) == 0);
5328  }
5329  // Check whether this block is in m_blocks_unlinked.
5330  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5331  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5332  rangeUnlinked =
5333  m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
5334  bool foundInUnlinked = false;
5335  while (rangeUnlinked.first != rangeUnlinked.second) {
5336  assert(rangeUnlinked.first->first == pindex->pprev);
5337  if (rangeUnlinked.first->second == pindex) {
5338  foundInUnlinked = true;
5339  break;
5340  }
5341  rangeUnlinked.first++;
5342  }
5343  if (pindex->pprev && pindex->nStatus.hasData() &&
5344  pindexFirstNeverProcessed != nullptr &&
5345  pindexFirstInvalid == nullptr) {
5346  // If this block has block data available, some parent was never
5347  // received, and has no invalid parents, it must be in
5348  // m_blocks_unlinked.
5349  assert(foundInUnlinked);
5350  }
5351  if (!pindex->nStatus.hasData()) {
5352  // Can't be in m_blocks_unlinked if we don't HAVE_DATA
5353  assert(!foundInUnlinked);
5354  }
5355  if (pindexFirstMissing == nullptr) {
5356  // We aren't missing data for any parent -- cannot be in
5357  // m_blocks_unlinked.
5358  assert(!foundInUnlinked);
5359  }
5360  if (pindex->pprev && pindex->nStatus.hasData() &&
5361  pindexFirstNeverProcessed == nullptr &&
5362  pindexFirstMissing != nullptr) {
5363  // We HAVE_DATA for this block, have received data for all parents
5364  // at some point, but we're currently missing data for some parent.
5365  // We must have pruned.
5367  // This block may have entered m_blocks_unlinked if:
5368  // - it has a descendant that at some point had more work than the
5369  // tip, and
5370  // - we tried switching to that descendant but were missing
5371  // data for some intermediate block between m_chain and the
5372  // tip.
5373  // So if this block is itself better than m_chain.Tip() and it
5374  // wasn't in
5375  // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
5376  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
5377  setBlockIndexCandidates.count(pindex) == 0) {
5378  if (pindexFirstInvalid == nullptr) {
5379  assert(foundInUnlinked);
5380  }
5381  }
5382  }
5383  // Perhaps too slow
5384  // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash());
5385  // End: actual consistency checks.
5386 
5387  // Try descending into the first subnode.
5388  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5389  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5390  range = forward.equal_range(pindex);
5391  if (range.first != range.second) {
5392  // A subnode was found.
5393  pindex = range.first->second;
5394  nHeight++;
5395  continue;
5396  }
5397  // This is a leaf node. Move upwards until we reach a node of which we
5398  // have not yet visited the last child.
5399  while (pindex) {
5400  // We are going to either move to a parent or a sibling of pindex.
5401  // If pindex was the first with a certain property, unset the
5402  // corresponding variable.
5403  if (pindex == pindexFirstInvalid) {
5404  pindexFirstInvalid = nullptr;
5405  }
5406  if (pindex == pindexFirstParked) {
5407  pindexFirstParked = nullptr;
5408  }
5409  if (pindex == pindexFirstMissing) {
5410  pindexFirstMissing = nullptr;
5411  }
5412  if (pindex == pindexFirstNeverProcessed) {
5413  pindexFirstNeverProcessed = nullptr;
5414  }
5415  if (pindex == pindexFirstNotTreeValid) {
5416  pindexFirstNotTreeValid = nullptr;
5417  }
5418  if (pindex == pindexFirstNotTransactionsValid) {
5419  pindexFirstNotTransactionsValid = nullptr;
5420  }
5421  if (pindex == pindexFirstNotChainValid) {
5422  pindexFirstNotChainValid = nullptr;
5423  }
5424  if (pindex == pindexFirstNotScriptsValid) {
5425  pindexFirstNotScriptsValid = nullptr;
5426  }
5427  // Find our parent.
5428  CBlockIndex *pindexPar = pindex->pprev;
5429  // Find which child we just visited.
5430  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5431  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5432  rangePar = forward.equal_range(pindexPar);
5433  while (rangePar.first->second != pindex) {
5434  // Our parent must have at least the node we're coming from as
5435  // child.
5436  assert(rangePar.first != rangePar.second);
5437  rangePar.first++;
5438  }
5439  // Proceed to the next one.
5440  rangePar.first++;
5441  if (rangePar.first != rangePar.second) {
5442  // Move to the sibling.
5443  pindex = rangePar.first->second;
5444  break;
5445  } else {
5446  // Move up further.
5447  pindex = pindexPar;
5448  nHeight--;
5449  continue;
5450  }
5451  }
5452  }
5453 
5454  // Check that we actually traversed the entire map.
5455  assert(nNodes == forward.size());
5456 }
5457 
5458 std::string CChainState::ToString() {
5460  CBlockIndex *tip = m_chain.Tip();
5461  return strprintf("Chainstate [%s] @ height %d (%s)",
5462  m_from_snapshot_blockhash ? "snapshot" : "ibd",
5463  tip ? tip->nHeight : -1,
5464  tip ? tip->GetBlockHash().ToString() : "null");
5465 }
5466 
5467 bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size) {
5469  if (coinstip_size == m_coinstip_cache_size_bytes &&
5470  coinsdb_size == m_coinsdb_cache_size_bytes) {
5471  // Cache sizes are unchanged, no need to continue.
5472  return true;
5473  }
5474  size_t old_coinstip_size = m_coinstip_cache_size_bytes;
5475  m_coinstip_cache_size_bytes = coinstip_size;
5476  m_coinsdb_cache_size_bytes = coinsdb_size;
5477  CoinsDB().ResizeCache(coinsdb_size);
5478 
5479  LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n", this->ToString(),
5480  coinsdb_size * (1.0 / 1024 / 1024));
5481  LogPrintf("[%s] resized coinstip cache to %.1f MiB\n", this->ToString(),
5482  coinstip_size * (1.0 / 1024 / 1024));
5483 
5484  BlockValidationState state;
5485  bool ret;
5486 
5487  if (coinstip_size > old_coinstip_size) {
5488  // Likely no need to flush if cache sizes have grown.
5490  } else {
5491  // Otherwise, flush state to disk and deallocate the in-memory coins
5492  // map.
5495  }
5496  return ret;
5497 }
5498 
5499 static const uint64_t MEMPOOL_DUMP_VERSION = 1;
5500 
5501 bool LoadMempool(const Config &config, CTxMemPool &pool,
5502  CChainState &active_chainstate) {
5503  int64_t nExpiryTimeout =
5504  gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
5505  FILE *filestr =
5506  fsbridge::fopen(gArgs.GetDataDirNet() / "mempool.dat", "rb");
5507  CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
5508  if (file.IsNull()) {
5509  LogPrintf(
5510  "Failed to open mempool file from disk. Continuing anyway.\n");
5511  return false;
5512  }
5513 
5514  int64_t count = 0;
5515  int64_t expired = 0;
5516  int64_t failed = 0;
5517  int64_t already_there = 0;
5518  int64_t unbroadcast = 0;
5519  int64_t nNow = GetTime();
5520 
5521  try {
5522  uint64_t version;
5523  file >> version;
5524  if (version != MEMPOOL_DUMP_VERSION) {
5525  return false;
5526  }
5527 
5528  uint64_t num;
5529  file >> num;
5530  while (num) {
5531  --num;
5532  CTransactionRef tx;
5533  int64_t nTime;
5534  int64_t nFeeDelta;
5535  file >> tx;
5536  file >> nTime;
5537  file >> nFeeDelta;
5538 
5539  Amount amountdelta = nFeeDelta * SATOSHI;
5540  if (amountdelta != Amount::zero()) {
5541  pool.PrioritiseTransaction(tx->GetId(), amountdelta);
5542  }
5543  if (nTime > nNow - nExpiryTimeout) {
5544  LOCK(cs_main);
5545  const auto &accepted =
5546  AcceptToMemoryPool(config, active_chainstate, tx, nTime,
5547  /*bypass_limits=*/false,
5548  /*test_accept=*/false);
5549  if (accepted.m_result_type ==
5551  ++count;
5552  } else {
5553  // mempool may contain the transaction already, e.g. from
5554  // wallet(s) having loaded it while we were processing
5555  // mempool transactions; consider these as valid, instead of
5556  // failed, but mark them as 'already there'
5557  if (pool.exists(tx->GetId())) {
5558  ++already_there;
5559  } else {
5560  ++failed;
5561  }
5562  }
5563  } else {
5564  ++expired;
5565  }
5566 
5567  if (ShutdownRequested()) {
5568  return false;
5569  }
5570  }
5571  std::map<TxId, Amount> mapDeltas;
5572  file >> mapDeltas;
5573 
5574  for (const auto &i : mapDeltas) {
5575  pool.PrioritiseTransaction(i.first, i.second);
5576  }
5577 
5578  std::set<TxId> unbroadcast_txids;
5579  file >> unbroadcast_txids;
5580  unbroadcast = unbroadcast_txids.size();
5581  for (const auto &txid : unbroadcast_txids) {
5582  // Ensure transactions were accepted to mempool then add to
5583  // unbroadcast set.
5584  if (pool.get(txid) != nullptr) {
5585  pool.AddUnbroadcastTx(txid);
5586  }
5587  }
5588  } catch (const std::exception &e) {
5589  LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing "
5590  "anyway.\n",
5591  e.what());
5592  return false;
5593  }
5594 
5595  LogPrintf("Imported mempool transactions from disk: %i succeeded, %i "
5596  "failed, %i expired, %i already there, %i waiting for initial "
5597  "broadcast\n",
5598  count, failed, expired, already_there, unbroadcast);
5599  return true;
5600 }
5601 
5602 bool DumpMempool(const