Bitcoin ABC  0.26.3
P2P Digital Currency
validation.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2018 The Bitcoin Core developers
3 // Copyright (c) 2017-2020 The Bitcoin developers
4 // Distributed under the MIT software license, see the accompanying
5 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
6 
7 #include <validation.h>
8 
9 #include <arith_uint256.h>
10 #include <avalanche/avalanche.h>
11 #include <avalanche/processor.h>
12 #include <blockvalidity.h>
13 #include <chainparams.h>
14 #include <checkpoints.h>
15 #include <checkqueue.h>
16 #include <config.h>
17 #include <consensus/activation.h>
18 #include <consensus/amount.h>
19 #include <consensus/merkle.h>
20 #include <consensus/tx_check.h>
21 #include <consensus/tx_verify.h>
22 #include <consensus/validation.h>
23 #include <deploymentstatus.h>
24 #include <hash.h>
25 #include <index/blockfilterindex.h>
26 #include <logging.h>
27 #include <logging/timer.h>
28 #include <minerfund.h>
29 #include <node/blockstorage.h>
30 #include <node/coinstats.h>
31 #include <node/ui_interface.h>
32 #include <node/utxo_snapshot.h>
33 #include <policy/block/minerfund.h>
35 #include <policy/policy.h>
36 #include <policy/settings.h>
37 #include <pow/pow.h>
38 #include <primitives/block.h>
39 #include <primitives/transaction.h>
40 #include <random.h>
41 #include <reverse_iterator.h>
42 #include <script/script.h>
43 #include <script/scriptcache.h>
44 #include <script/sigcache.h>
45 #include <shutdown.h>
46 #include <tinyformat.h>
47 #include <txdb.h>
48 #include <txmempool.h>
49 #include <undo.h>
50 #include <util/check.h> // For NDEBUG compile time check
51 #include <util/strencodings.h>
52 #include <util/string.h>
53 #include <util/system.h>
54 #include <util/trace.h>
55 #include <util/translation.h>
56 #include <validationinterface.h>
57 #include <warnings.h>
58 
59 #include <algorithm>
60 #include <atomic>
61 #include <cassert>
62 #include <deque>
63 #include <numeric>
64 #include <optional>
65 #include <string>
66 #include <thread>
67 
69 using node::BlockManager;
70 using node::BlockMap;
71 using node::CCoinsStats;
74 using node::fReindex;
75 using node::nPruneTarget;
82 
83 #define MICRO 0.000001
84 #define MILLI 0.001
85 
87 static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
89 static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
90 const std::vector<std::string> CHECKLEVEL_DOC{
91  "level 0 reads the blocks from disk",
92  "level 1 verifies block validity",
93  "level 2 verifies undo data",
94  "level 3 checks disconnection of tip blocks",
95  "level 4 tries to reconnect the blocks",
96  "each level includes the checks of the previous levels",
97 };
98 
112 
114 std::condition_variable g_best_block_cv;
116 bool fRequireStandard = true;
117 bool fCheckBlockIndex = false;
120 
123 
125  : excessiveBlockSize(config.GetMaxBlockSize()), checkPoW(true),
126  checkMerkleRoot(true) {}
127 
128 const CBlockIndex *
131 
132  // Find the latest block common to locator and chain - we expect that
133  // locator.vHave is sorted descending by height.
134  for (const BlockHash &hash : locator.vHave) {
135  const CBlockIndex *pindex{m_blockman.LookupBlockIndex(hash)};
136  if (pindex) {
137  if (m_chain.Contains(pindex)) {
138  return pindex;
139  }
140  if (pindex->GetAncestor(m_chain.Height()) == m_chain.Tip()) {
141  return m_chain.Tip();
142  }
143  }
144  }
145  return m_chain.Genesis();
146 }
147 
148 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
149  const CBlockIndex *pindex);
150 
151 bool CheckSequenceLocksAtTip(CBlockIndex *tip, const CCoinsView &coins_view,
152  const CTransaction &tx, LockPoints *lp,
153  bool useExistingLockPoints) {
154  assert(tip != nullptr);
155 
156  CBlockIndex index;
157  index.pprev = tip;
158  // CheckSequenceLocksAtTip() uses active_chainstate.m_chain.Height()+1 to
159  // evaluate height based locks because when SequenceLocks() is called within
160  // ConnectBlock(), the height of the block *being* evaluated is what is
161  // used. Thus if we want to know if a transaction can be part of the *next*
162  // block, we need to use one more than active_chainstate.m_chain.Height()
163  index.nHeight = tip->nHeight + 1;
164 
165  std::pair<int, int64_t> lockPair;
166  if (useExistingLockPoints) {
167  assert(lp);
168  lockPair.first = lp->height;
169  lockPair.second = lp->time;
170  } else {
171  std::vector<int> prevheights;
172  prevheights.resize(tx.vin.size());
173  for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
174  const CTxIn &txin = tx.vin[txinIndex];
175  Coin coin;
176  if (!coins_view.GetCoin(txin.prevout, coin)) {
177  return error("%s: Missing input", __func__);
178  }
179  if (coin.GetHeight() == MEMPOOL_HEIGHT) {
180  // Assume all mempool transaction confirm in the next block
181  prevheights[txinIndex] = tip->nHeight + 1;
182  } else {
183  prevheights[txinIndex] = coin.GetHeight();
184  }
185  }
187  prevheights, index);
188  if (lp) {
189  lp->height = lockPair.first;
190  lp->time = lockPair.second;
191  // Also store the hash of the block with the highest height of all
192  // the blocks which have sequence locked prevouts. This hash needs
193  // to still be on the chain for these LockPoint calculations to be
194  // valid.
195  // Note: It is impossible to correctly calculate a maxInputBlock if
196  // any of the sequence locked inputs depend on unconfirmed txs,
197  // except in the special case where the relative lock time/height is
198  // 0, which is equivalent to no sequence lock. Since we assume input
199  // height of tip+1 for mempool txs and test the resulting lockPair
200  // from CalculateSequenceLocks against tip+1. We know
201  // EvaluateSequenceLocks will fail if there was a non-zero sequence
202  // lock on a mempool input, so we can use the return value of
203  // CheckSequenceLocksAtTip to indicate the LockPoints validity.
204  int maxInputHeight = 0;
205  for (const int height : prevheights) {
206  // Can ignore mempool inputs since we'll fail if they had
207  // non-zero locks.
208  if (height != tip->nHeight + 1) {
209  maxInputHeight = std::max(maxInputHeight, height);
210  }
211  }
212  // tip->GetAncestor(maxInputHeight) should never return a nullptr
213  // because maxInputHeight is always less than the tip height.
214  // It would, however, be a bad bug to continue execution, since a
215  // LockPoints object with the maxInputBlock member set to nullptr
216  // signifies no relative lock time.
217  lp->maxInputBlock = Assert(tip->GetAncestor(maxInputHeight));
218  }
219  }
220  return EvaluateSequenceLocks(index, lockPair);
221 }
222 
223 // Command-line argument "-replayprotectionactivationtime=<timestamp>" will
224 // cause the node to switch to replay protected SigHash ForkID value when the
225 // median timestamp of the previous 11 blocks is greater than or equal to
226 // <timestamp>. Defaults to the pre-defined timestamp when not set.
228  int64_t nMedianTimePast) {
229  return nMedianTimePast >= gArgs.GetIntArg("-replayprotectionactivationtime",
230  params.leeKuanYewActivationTime);
231 }
232 
234  const CBlockIndex *pindexPrev) {
235  if (pindexPrev == nullptr) {
236  return false;
237  }
238 
239  return IsReplayProtectionEnabled(params, pindexPrev->GetMedianTimePast());
240 }
241 
248  const CTransaction &tx, TxValidationState &state,
249  const CCoinsViewCache &view, const CTxMemPool &pool, const uint32_t flags,
250  PrecomputedTransactionData &txdata, int &nSigChecksOut,
253  AssertLockHeld(pool.cs);
254 
255  assert(!tx.IsCoinBase());
256  for (const CTxIn &txin : tx.vin) {
257  const Coin &coin = view.AccessCoin(txin.prevout);
258 
259  // This coin was checked in PreChecks and MemPoolAccept
260  // has been holding cs_main since then.
261  Assume(!coin.IsSpent());
262  if (coin.IsSpent()) {
263  return false;
264  }
265 
266  // If the Coin is available, there are 2 possibilities:
267  // it is available in our current ChainstateActive UTXO set,
268  // or it's a UTXO provided by a transaction in our mempool.
269  // Ensure the scriptPubKeys in Coins from CoinsView are correct.
270  const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId());
271  if (txFrom) {
272  assert(txFrom->GetId() == txin.prevout.GetTxId());
273  assert(txFrom->vout.size() > txin.prevout.GetN());
274  assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut());
275  } else {
276  const Coin &coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
277  assert(!coinFromUTXOSet.IsSpent());
278  assert(coinFromUTXOSet.GetTxOut() == coin.GetTxOut());
279  }
280  }
281 
282  // Call CheckInputScripts() to cache signature and script validity against
283  // current tip consensus rules.
284  return CheckInputScripts(tx, state, view, flags, /*sigCacheStore=*/true,
285  /*scriptCacheStore=*/true, txdata, nSigChecksOut);
286 }
287 
288 namespace {
289 
290 class MemPoolAccept {
291 public:
292  MemPoolAccept(CTxMemPool &mempool, Chainstate &active_chainstate)
293  : m_pool(mempool), m_view(&m_dummy),
294  m_viewmempool(&active_chainstate.CoinsTip(), m_pool),
295  m_active_chainstate(active_chainstate) {}
296 
297  // We put the arguments we're handed into a struct, so we can pass them
298  // around easier.
299  struct ATMPArgs {
300  const Config &m_config;
301  const int64_t m_accept_time;
302  const bool m_bypass_limits;
303  /*
304  * Return any outpoints which were not previously present in the coins
305  * cache, but were added as a result of validating the tx for mempool
306  * acceptance. This allows the caller to optionally remove the cache
307  * additions if the associated transaction ends up being rejected by
308  * the mempool.
309  */
310  std::vector<COutPoint> &m_coins_to_uncache;
311  const bool m_test_accept;
312  const unsigned int m_heightOverride;
318  const bool m_package_submission;
319 
321  static ATMPArgs SingleAccept(const Config &config, int64_t accept_time,
322  bool bypass_limits,
323  std::vector<COutPoint> &coins_to_uncache,
324  bool test_accept,
325  unsigned int heightOverride) {
326  return ATMPArgs{config,
327  accept_time,
328  bypass_limits,
329  coins_to_uncache,
330  test_accept,
331  heightOverride,
332  /*package_submission=*/false};
333  }
334 
339  static ATMPArgs
340  PackageTestAccept(const Config &config, int64_t accept_time,
341  std::vector<COutPoint> &coins_to_uncache) {
342  return ATMPArgs{config, accept_time,
343  /*bypass_limits=*/false, coins_to_uncache,
344  /*test_accept=*/true,
345  /*height_override=*/0,
346  // not submitting to mempool
347  /*package_submission=*/false};
348  }
349 
351  static ATMPArgs
352  PackageChildWithParents(const Config &config, int64_t accept_time,
353  std::vector<COutPoint> &coins_to_uncache) {
354  return ATMPArgs{config,
355  accept_time,
356  /*bypass_limits=*/false,
357  coins_to_uncache,
358  /*test_accept=*/false,
359  /*height_override=*/0,
360  /*package_submission=*/true};
361  }
362 
363  private:
364  // Private ctor to avoid exposing details to clients and allowing the
365  // possibility of mixing up the order of the arguments. Use static
366  // functions above instead.
367  ATMPArgs(const Config &config, int64_t accept_time, bool bypass_limits,
368  std::vector<COutPoint> &coins_to_uncache, bool test_accept,
369  unsigned int height_override, bool package_submission)
370  : m_config{config}, m_accept_time{accept_time},
371  m_bypass_limits{bypass_limits},
372  m_coins_to_uncache{coins_to_uncache}, m_test_accept{test_accept},
373  m_heightOverride{height_override}, m_package_submission{
374  package_submission} {}
375  };
376 
377  // Single transaction acceptance
378  MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef &ptx,
379  ATMPArgs &args)
381 
389  AcceptMultipleTransactions(const std::vector<CTransactionRef> &txns,
390  ATMPArgs &args)
392 
398  PackageMempoolAcceptResult AcceptPackage(const Package &package,
399  ATMPArgs &args)
401 
402 private:
403  // All the intermediate state that gets passed between the various levels
404  // of checking a given transaction.
405  struct Workspace {
406  Workspace(const CTransactionRef &ptx,
407  const uint32_t next_block_script_verify_flags)
408  : m_ptx(ptx),
409  m_next_block_script_verify_flags(next_block_script_verify_flags) {
410  }
416  std::unique_ptr<CTxMemPoolEntry> m_entry;
417 
422  int64_t m_vsize;
427  Amount m_base_fees;
428 
433  Amount m_modified_fees;
434 
435  const CTransactionRef &m_ptx;
436  TxValidationState m_state;
442  PrecomputedTransactionData m_precomputed_txdata;
443 
444  // ABC specific flags that are used in both PreChecks and
445  // ConsensusScriptChecks
446  const uint32_t m_next_block_script_verify_flags;
447  int m_sig_checks_standard;
448  };
449 
450  // Run the policy checks on a given transaction, excluding any script
451  // checks. Looks up inputs, calculates feerate, considers replacement,
452  // evaluates package limits, etc. As this function can be invoked for "free"
453  // by a peer, only tests that are fast should be done here (to avoid CPU
454  // DoS).
455  bool PreChecks(ATMPArgs &args, Workspace &ws)
456  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
457 
458  // Re-run the script checks, using consensus flags, and try to cache the
459  // result in the scriptcache. This should be done after
460  // PolicyScriptChecks(). This requires that all inputs either be in our
461  // utxo set or in the mempool.
462  bool ConsensusScriptChecks(const ATMPArgs &args, Workspace &ws)
463  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
464 
465  // Try to add the transaction to the mempool, removing any conflicts first.
466  // Returns true if the transaction is in the mempool after any size
467  // limiting is performed, false otherwise.
468  bool Finalize(const ATMPArgs &args, Workspace &ws)
469  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
470 
471  // Submit all transactions to the mempool and call ConsensusScriptChecks to
472  // add to the script cache - should only be called after successful
473  // validation of all transactions in the package.
474  // The package may end up partially-submitted after size limiting;
475  // returns true if all transactions are successfully added to the mempool,
476  // false otherwise.
477  bool SubmitPackage(const ATMPArgs &args, std::vector<Workspace> &workspaces,
478  PackageValidationState &package_state,
479  std::map<const TxId, const MempoolAcceptResult> &results)
480  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
481 
482 private:
483  CTxMemPool &m_pool;
484  CCoinsViewCache m_view;
485  CCoinsViewMemPool m_viewmempool;
486  CCoinsView m_dummy;
487 
488  Chainstate &m_active_chainstate;
489 };
490 
491 bool MemPoolAccept::PreChecks(ATMPArgs &args, Workspace &ws) {
493  AssertLockHeld(m_pool.cs);
494  const CTransactionRef &ptx = ws.m_ptx;
495  const CTransaction &tx = *ws.m_ptx;
496  const TxId &txid = ws.m_ptx->GetId();
497 
498  // Copy/alias what we need out of args
499  const int64_t nAcceptTime = args.m_accept_time;
500  const bool bypass_limits = args.m_bypass_limits;
501  std::vector<COutPoint> &coins_to_uncache = args.m_coins_to_uncache;
502  const unsigned int heightOverride = args.m_heightOverride;
503 
504  // Alias what we need out of ws
505  TxValidationState &state = ws.m_state;
506  // Coinbase is only valid in a block, not as a loose transaction.
507  if (!CheckRegularTransaction(tx, state)) {
508  // state filled in by CheckRegularTransaction.
509  return false;
510  }
511 
512  // Rather not work on nonstandard transactions (unless -testnet)
513  std::string reason;
514  if (fRequireStandard && !IsStandardTx(tx, reason)) {
515  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
516  }
517 
518  // Only accept nLockTime-using transactions that can be mined in the next
519  // block; we don't want our mempool filled up with transactions that can't
520  // be mined yet.
521  TxValidationState ctxState;
523  m_active_chainstate.m_chain.Tip(),
524  args.m_config.GetChainParams().GetConsensus(), tx, ctxState)) {
525  // We copy the state from a dummy to ensure we don't increase the
526  // ban score of peer for transaction that could be valid in the future.
528  ctxState.GetRejectReason(),
529  ctxState.GetDebugMessage());
530  }
531 
532  // Is it already in the memory pool?
533  if (m_pool.exists(txid)) {
535  "txn-already-in-mempool");
536  }
537 
538  // Check for conflicts with in-memory transactions
539  for (const CTxIn &txin : tx.vin) {
540  auto itConflicting = m_pool.mapNextTx.find(txin.prevout);
541  if (itConflicting != m_pool.mapNextTx.end()) {
542  // Disable replacement feature for good
544  "txn-mempool-conflict");
545  }
546  }
547 
548  LockPoints lp;
549  m_view.SetBackend(m_viewmempool);
550 
551  const CCoinsViewCache &coins_cache = m_active_chainstate.CoinsTip();
552  // Do all inputs exist?
553  for (const CTxIn &txin : tx.vin) {
554  if (!coins_cache.HaveCoinInCache(txin.prevout)) {
555  coins_to_uncache.push_back(txin.prevout);
556  }
557 
558  // Note: this call may add txin.prevout to the coins cache
559  // (coins_cache.cacheCoins) by way of FetchCoin(). It should be
560  // removed later (via coins_to_uncache) if this tx turns out to be
561  // invalid.
562  if (!m_view.HaveCoin(txin.prevout)) {
563  // Are inputs missing because we already have the tx?
564  for (size_t out = 0; out < tx.vout.size(); out++) {
565  // Optimistically just do efficient check of cache for
566  // outputs.
567  if (coins_cache.HaveCoinInCache(COutPoint(txid, out))) {
569  "txn-already-known");
570  }
571  }
572 
573  // Otherwise assume this might be an orphan tx for which we just
574  // haven't seen parents yet.
576  "bad-txns-inputs-missingorspent");
577  }
578  }
579 
580  // Are the actual inputs available?
581  if (!m_view.HaveInputs(tx)) {
583  "bad-txns-inputs-spent");
584  }
585 
586  // Bring the best block into scope.
587  m_view.GetBestBlock();
588 
589  // we have all inputs cached now, so switch back to dummy (to protect
590  // against bugs where we pull more inputs from disk that miss being
591  // added to coins_to_uncache)
592  m_view.SetBackend(m_dummy);
593 
594  assert(m_active_chainstate.m_blockman.LookupBlockIndex(
595  m_view.GetBestBlock()) == m_active_chainstate.m_chain.Tip());
596 
597  // Only accept BIP68 sequence locked transactions that can be mined in
598  // the next block; we don't want our mempool filled up with transactions
599  // that can't be mined yet.
600  // Pass in m_view which has all of the relevant inputs cached. Note that,
601  // since m_view's backend was removed, it no longer pulls coins from the
602  // mempool.
603  if (!CheckSequenceLocksAtTip(m_active_chainstate.m_chain.Tip(), m_view, tx,
604  &lp)) {
606  "non-BIP68-final");
607  }
608 
609  // The mempool holds txs for the next block, so pass height+1 to
610  // CheckTxInputs
611  if (!Consensus::CheckTxInputs(tx, state, m_view,
612  m_active_chainstate.m_chain.Height() + 1,
613  ws.m_base_fees)) {
614  // state filled in by CheckTxInputs
615  return false;
616  }
617 
618  // Check for non-standard pay-to-script-hash in inputs
619  if (fRequireStandard &&
620  !AreInputsStandard(tx, m_view, ws.m_next_block_script_verify_flags)) {
622  "bad-txns-nonstandard-inputs");
623  }
624 
625  // ws.m_modified_fess includes any fee deltas from PrioritiseTransaction
626  ws.m_modified_fees = ws.m_base_fees;
627  m_pool.ApplyDelta(txid, ws.m_modified_fees);
628 
629  // Keep track of transactions that spend a coinbase, which we re-scan
630  // during reorgs to ensure COINBASE_MATURITY is still met.
631  bool fSpendsCoinbase = false;
632  for (const CTxIn &txin : tx.vin) {
633  const Coin &coin = m_view.AccessCoin(txin.prevout);
634  if (coin.IsCoinBase()) {
635  fSpendsCoinbase = true;
636  break;
637  }
638  }
639 
640  unsigned int nSize = tx.GetTotalSize();
641 
642  // No transactions are allowed below minRelayTxFee except from disconnected
643  // blocks.
644  // Do not change this to use virtualsize without coordinating a network
645  // policy upgrade.
646  if (!bypass_limits && ws.m_modified_fees < minRelayTxFee.GetFee(nSize)) {
648  "min relay fee not met",
649  strprintf("%d < %d", ws.m_modified_fees,
650  ::minRelayTxFee.GetFee(nSize)));
651  }
652 
653  // Validate input scripts against standard script flags.
654  const uint32_t scriptVerifyFlags =
655  ws.m_next_block_script_verify_flags | STANDARD_SCRIPT_VERIFY_FLAGS;
656  ws.m_precomputed_txdata = PrecomputedTransactionData{tx};
657  if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false,
658  ws.m_precomputed_txdata, ws.m_sig_checks_standard)) {
659  // State filled in by CheckInputScripts
660  return false;
661  }
662 
663  ws.m_entry = std::make_unique<CTxMemPoolEntry>(
664  ptx, ws.m_base_fees, nAcceptTime,
665  heightOverride ? heightOverride : m_active_chainstate.m_chain.Height(),
666  fSpendsCoinbase, ws.m_sig_checks_standard, lp);
667 
668  ws.m_vsize = ws.m_entry->GetTxVirtualSize();
669 
670  Amount mempoolRejectFee = m_pool.GetMinFee().GetFee(ws.m_vsize);
671  if (!bypass_limits && mempoolRejectFee > Amount::zero() &&
672  ws.m_modified_fees < mempoolRejectFee) {
673  return state.Invalid(
674  TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met",
675  strprintf("%d < %d", ws.m_modified_fees, mempoolRejectFee));
676  }
677 
678  return true;
679 }
680 
681 bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs &args, Workspace &ws) {
683  AssertLockHeld(m_pool.cs);
684  const CTransaction &tx = *ws.m_ptx;
685  const TxId &txid = tx.GetId();
686  TxValidationState &state = ws.m_state;
687 
688  // Check again against the next block's script verification flags
689  // to cache our script execution flags.
690  //
691  // This is also useful in case of bugs in the standard flags that cause
692  // transactions to pass as valid when they're actually invalid. For
693  // instance the STRICTENC flag was incorrectly allowing certain CHECKSIG
694  // NOT scripts to pass, even though they were invalid.
695  //
696  // There is a similar check in CreateNewBlock() to prevent creating
697  // invalid blocks (using TestBlockValidity), however allowing such
698  // transactions into the mempool can be exploited as a DoS attack.
699  int nSigChecksConsensus;
701  tx, state, m_view, m_pool, ws.m_next_block_script_verify_flags,
702  ws.m_precomputed_txdata, nSigChecksConsensus,
703  m_active_chainstate.CoinsTip())) {
704  // This can occur under some circumstances, if the node receives an
705  // unrequested tx which is invalid due to new consensus rules not
706  // being activated yet (during IBD).
707  LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against "
708  "latest-block but not STANDARD flags %s, %s\n",
709  txid.ToString(), state.ToString());
710  return Assume(false);
711  }
712 
713  if (ws.m_sig_checks_standard != nSigChecksConsensus) {
714  // We can't accept this transaction as we've used the standard count
715  // for the mempool/mining, but the consensus count will be enforced
716  // in validation (we don't want to produce bad block templates).
717  return error(
718  "%s: BUG! PLEASE REPORT THIS! SigChecks count differed between "
719  "standard and consensus flags in %s",
720  __func__, txid.ToString());
721  }
722  return true;
723 }
724 
725 bool MemPoolAccept::Finalize(const ATMPArgs &args, Workspace &ws) {
727  AssertLockHeld(m_pool.cs);
728  const TxId &txid = ws.m_ptx->GetId();
729  TxValidationState &state = ws.m_state;
730  const bool bypass_limits = args.m_bypass_limits;
731 
732  // Store transaction in memory
733  CTxMemPoolEntry *pentry = ws.m_entry.release();
734  auto entry = CTxMemPoolEntryRef::acquire(pentry);
735  m_pool.addUnchecked(entry);
736 
737  // Trim mempool and check if tx was trimmed.
738  // If we are validating a package, don't trim here because we could evict a
739  // previous transaction in the package. LimitMempoolSize() should be called
740  // at the very end to make sure the mempool is still within limits and
741  // package submission happens atomically.
742  if (!args.m_package_submission && !bypass_limits) {
743  m_pool.LimitSize(m_active_chainstate.CoinsTip());
744  if (!m_pool.exists(txid)) {
746  "mempool full");
747  }
748  }
749  return true;
750 }
751 
752 // Get the coins spent by ptx from the coins_view. Assumes coins are present.
753 static std::vector<Coin> getSpentCoins(const CTransactionRef &ptx,
754  const CCoinsViewCache &coins_view) {
755  std::vector<Coin> spent_coins;
756  spent_coins.reserve(ptx->vin.size());
757  for (const CTxIn &input : ptx->vin) {
758  Coin coin;
759  const bool coinFound = coins_view.GetCoin(input.prevout, coin);
760  Assume(coinFound);
761  spent_coins.push_back(std::move(coin));
762  }
763  return spent_coins;
764 }
765 
766 bool MemPoolAccept::SubmitPackage(
767  const ATMPArgs &args, std::vector<Workspace> &workspaces,
768  PackageValidationState &package_state,
769  std::map<const TxId, const MempoolAcceptResult> &results) {
771  AssertLockHeld(m_pool.cs);
772  // Sanity check: none of the transactions should be in the mempool.
773  assert(std::all_of(
774  workspaces.cbegin(), workspaces.cend(),
775  [this](const auto &ws) { return !m_pool.exists(ws.m_ptx->GetId()); }));
776 
777  bool all_submitted = true;
778  // ConsensusScriptChecks adds to the script cache and is therefore
779  // consensus-critical; CheckInputsFromMempoolAndCache asserts that
780  // transactions only spend coins available from the mempool or UTXO set.
781  // Submit each transaction to the mempool immediately after calling
782  // ConsensusScriptChecks to make the outputs available for subsequent
783  // transactions.
784  for (Workspace &ws : workspaces) {
785  if (!ConsensusScriptChecks(args, ws)) {
786  results.emplace(ws.m_ptx->GetId(),
787  MempoolAcceptResult::Failure(ws.m_state));
788  // Since PreChecks() passed, this should never fail.
789  all_submitted = Assume(false);
790  }
791 
792  // If we call LimitMempoolSize() for each individual Finalize(), the
793  // mempool will not take the transaction's descendant feerate into
794  // account because it hasn't seen them yet. Also, we risk evicting a
795  // transaction that a subsequent package transaction depends on.
796  // Instead, allow the mempool to temporarily bypass limits, the maximum
797  // package size) while submitting transactions individually and then
798  // trim at the very end.
799  if (!Finalize(args, ws)) {
800  results.emplace(ws.m_ptx->GetId(),
801  MempoolAcceptResult::Failure(ws.m_state));
802  // Since LimitMempoolSize() won't be called, this should never fail.
803  all_submitted = Assume(false);
804  }
805  }
806 
807  // It may or may not be the case that all the transactions made it into the
808  // mempool. Regardless, make sure we haven't exceeded max mempool size.
809  m_pool.LimitSize(m_active_chainstate.CoinsTip());
810  if (!all_submitted) {
811  return false;
812  }
813 
814  // Find the txids of the transactions that made it into the mempool. Allow
815  // partial submission, but don't report success unless they all made it into
816  // the mempool.
817  for (Workspace &ws : workspaces) {
818  if (m_pool.exists(ws.m_ptx->GetId())) {
819  results.emplace(ws.m_ptx->GetId(), MempoolAcceptResult::Success(
820  ws.m_vsize, ws.m_base_fees));
822  ws.m_ptx,
823  std::make_shared<const std::vector<Coin>>(
824  getSpentCoins(ws.m_ptx, m_view)),
825  m_pool.GetAndIncrementSequence());
826  } else {
827  all_submitted = false;
828  ws.m_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
829  "mempool full");
830  results.emplace(ws.m_ptx->GetId(),
831  MempoolAcceptResult::Failure(ws.m_state));
832  }
833  }
834  return all_submitted;
835 }
836 
838 MemPoolAccept::AcceptSingleTransaction(const CTransactionRef &ptx,
839  ATMPArgs &args) {
841  // mempool "read lock" (held through
842  // GetMainSignals().TransactionAddedToMempool())
843  LOCK(m_pool.cs);
844 
845  const Consensus::Params &consensusParams =
846  args.m_config.GetChainParams().GetConsensus();
847  const CBlockIndex *tip = m_active_chainstate.m_chain.Tip();
848 
849  Workspace ws(ptx, GetNextBlockScriptFlags(consensusParams, tip));
850 
851  // Perform the inexpensive checks first and avoid hashing and signature
852  // verification unless those checks pass, to mitigate CPU exhaustion
853  // denial-of-service attacks.
854  if (!PreChecks(args, ws)) {
855  return MempoolAcceptResult::Failure(ws.m_state);
856  }
857 
858  if (!ConsensusScriptChecks(args, ws)) {
859  return MempoolAcceptResult::Failure(ws.m_state);
860  }
861 
862  const TxId txid = ptx->GetId();
863 
864  // Mempool sanity check -- in our new mempool no tx can be added if its
865  // outputs are already spent in the mempool (that is, no children before
866  // parents allowed; the mempool must be consistent at all times).
867  //
868  // This means that on reorg, the disconnectpool *must* always import
869  // the existing mempool tx's, clear the mempool, and then re-add
870  // remaining tx's in topological order via this function. Our new mempool
871  // has fast adds, so this is ok.
872  if (auto it = m_pool.mapNextTx.lower_bound(COutPoint{txid, 0});
873  it != m_pool.mapNextTx.end() && it->first->GetTxId() == txid) {
874  LogPrintf("%s: BUG! PLEASE REPORT THIS! Attempt to add txid %s, but "
875  "its outputs are already spent in the "
876  "mempool\n",
877  __func__, txid.ToString());
879  "txn-child-before-parent");
880  return MempoolAcceptResult::Failure(ws.m_state);
881  }
882 
883  // Tx was accepted, but not added
884  if (args.m_test_accept) {
885  return MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees);
886  }
887 
888  if (!Finalize(args, ws)) {
889  return MempoolAcceptResult::Failure(ws.m_state);
890  }
891 
893  ptx,
894  std::make_shared<const std::vector<Coin>>(getSpentCoins(ptx, m_view)),
895  m_pool.GetAndIncrementSequence());
896 
897  return MempoolAcceptResult::Success(ws.m_vsize, ws.m_base_fees);
898 }
899 
900 PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(
901  const std::vector<CTransactionRef> &txns, ATMPArgs &args) {
903 
904  // These context-free package limits can be done before taking the mempool
905  // lock.
906  PackageValidationState package_state;
907  if (!CheckPackage(txns, package_state)) {
908  return PackageMempoolAcceptResult(package_state, {});
909  }
910 
911  std::vector<Workspace> workspaces{};
912  workspaces.reserve(txns.size());
913  std::transform(txns.cbegin(), txns.cend(), std::back_inserter(workspaces),
914  [&args, this](const auto &tx) {
915  return Workspace(
916  tx,
917  GetNextBlockScriptFlags(
918  args.m_config.GetChainParams().GetConsensus(),
919  m_active_chainstate.m_chain.Tip()));
920  });
921  std::map<const TxId, const MempoolAcceptResult> results;
922 
923  LOCK(m_pool.cs);
924 
925  // Do all PreChecks first and fail fast to avoid running expensive script
926  // checks when unnecessary.
927  for (Workspace &ws : workspaces) {
928  if (!PreChecks(args, ws)) {
930  "transaction failed");
931  // Exit early to avoid doing pointless work. Update the failed tx
932  // result; the rest are unfinished.
933  results.emplace(ws.m_ptx->GetId(),
934  MempoolAcceptResult::Failure(ws.m_state));
935  return PackageMempoolAcceptResult(package_state,
936  std::move(results));
937  }
938  // Make the coins created by this transaction available for subsequent
939  // transactions in the package to spend.
940  m_viewmempool.PackageAddTransaction(ws.m_ptx);
941  if (args.m_test_accept) {
942  // When test_accept=true, transactions that pass PreChecks
943  // are valid because there are no further mempool checks (passing
944  // PreChecks implies passing ConsensusScriptChecks).
945  results.emplace(ws.m_ptx->GetId(), MempoolAcceptResult::Success(
946  ws.m_vsize, ws.m_base_fees));
947  }
948  }
949 
950  if (args.m_test_accept) {
951  return PackageMempoolAcceptResult(package_state, std::move(results));
952  }
953 
954  if (!SubmitPackage(args, workspaces, package_state, results)) {
956  "submission failed");
957  return PackageMempoolAcceptResult(package_state, std::move(results));
958  }
959 
960  return PackageMempoolAcceptResult(package_state, std::move(results));
961 }
962 
963 PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package &package,
964  ATMPArgs &args) {
966  PackageValidationState package_state;
967 
968  // Check that the package is well-formed. If it isn't, we won't try to
969  // validate any of the transactions and thus won't return any
970  // MempoolAcceptResults, just a package-wide error.
971 
972  // Context-free package checks.
973  if (!CheckPackage(package, package_state)) {
974  return PackageMempoolAcceptResult(package_state, {});
975  }
976 
977  // All transactions in the package must be a parent of the last transaction.
978  // This is just an opportunity for us to fail fast on a context-free check
979  // without taking the mempool lock.
980  if (!IsChildWithParents(package)) {
982  "package-not-child-with-parents");
983  return PackageMempoolAcceptResult(package_state, {});
984  }
985 
986  // IsChildWithParents() guarantees the package is > 1 transactions.
987  assert(package.size() > 1);
988  // The package must be 1 child with all of its unconfirmed parents. The
989  // package is expected to be sorted, so the last transaction is the child.
990  const auto &child = package.back();
991  std::unordered_set<TxId, SaltedTxIdHasher> unconfirmed_parent_txids;
992  std::transform(
993  package.cbegin(), package.cend() - 1,
994  std::inserter(unconfirmed_parent_txids, unconfirmed_parent_txids.end()),
995  [](const auto &tx) { return tx->GetId(); });
996 
997  // All child inputs must refer to a preceding package transaction or a
998  // confirmed UTXO. The only way to verify this is to look up the child's
999  // inputs in our current coins view (not including mempool), and enforce
1000  // that all parents not present in the package be available at chain tip.
1001  // Since this check can bring new coins into the coins cache, keep track of
1002  // these coins and uncache them if we don't end up submitting this package
1003  // to the mempool.
1004  const CCoinsViewCache &coins_tip_cache = m_active_chainstate.CoinsTip();
1005  for (const auto &input : child->vin) {
1006  if (!coins_tip_cache.HaveCoinInCache(input.prevout)) {
1007  args.m_coins_to_uncache.push_back(input.prevout);
1008  }
1009  }
1010  // Using the MemPoolAccept m_view cache allows us to look up these same
1011  // coins faster later. This should be connecting directly to CoinsTip, not
1012  // to m_viewmempool, because we specifically require inputs to be confirmed
1013  // if they aren't in the package.
1014  m_view.SetBackend(m_active_chainstate.CoinsTip());
1015  const auto package_or_confirmed = [this, &unconfirmed_parent_txids](
1016  const auto &input) {
1017  return unconfirmed_parent_txids.count(input.prevout.GetTxId()) > 0 ||
1018  m_view.HaveCoin(input.prevout);
1019  };
1020  if (!std::all_of(child->vin.cbegin(), child->vin.cend(),
1021  package_or_confirmed)) {
1023  "package-not-child-with-unconfirmed-parents");
1024  return PackageMempoolAcceptResult(package_state, {});
1025  }
1026  // Protect against bugs where we pull more inputs from disk that miss being
1027  // added to coins_to_uncache. The backend will be connected again when
1028  // needed in PreChecks.
1029  m_view.SetBackend(m_dummy);
1030 
1031  LOCK(m_pool.cs);
1032  std::map<const TxId, const MempoolAcceptResult> results;
1033  // Node operators are free to set their mempool policies however they
1034  // please, nodes may receive transactions in different orders, and malicious
1035  // counterparties may try to take advantage of policy differences to pin or
1036  // delay propagation of transactions. As such, it's possible for some
1037  // package transaction(s) to already be in the mempool, and we don't want to
1038  // reject the entire package in that case (as that could be a censorship
1039  // vector). De-duplicate the transactions that are already in the mempool,
1040  // and only call AcceptMultipleTransactions() with the new transactions.
1041  // This ensures we don't double-count transaction counts and sizes when
1042  // checking ancestor/descendant limits, or double-count transaction fees for
1043  // fee-related policy.
1044  std::vector<CTransactionRef> txns_new;
1045  for (const auto &tx : package) {
1046  const auto &txid = tx->GetId();
1047  // An already confirmed tx is treated as one not in mempool, because all
1048  // we know is that the inputs aren't available.
1049  if (m_pool.exists(txid)) {
1050  // Exact transaction already exists in the mempool.
1051  auto iter = m_pool.GetIter(txid);
1052  assert(iter != std::nullopt);
1053  results.emplace(txid, MempoolAcceptResult::MempoolTx(
1054  (*iter.value())->GetTxSize(),
1055  (*iter.value())->GetFee()));
1056  } else {
1057  // Transaction does not already exist in the mempool.
1058  txns_new.push_back(tx);
1059  }
1060  }
1061 
1062  // Nothing to do if the entire package has already been submitted.
1063  if (txns_new.empty()) {
1064  return PackageMempoolAcceptResult(package_state, std::move(results));
1065  }
1066  // Validate the (deduplicated) transactions as a package.
1067  auto submission_result = AcceptMultipleTransactions(txns_new, args);
1068  // Include already-in-mempool transaction results in the final result.
1069  for (const auto &[txid, mempoolaccept_res] : results) {
1070  submission_result.m_tx_results.emplace(txid, mempoolaccept_res);
1071  }
1072  return submission_result;
1073 }
1074 } // namespace
1075 
1077  Chainstate &active_chainstate,
1078  const CTransactionRef &tx,
1079  int64_t accept_time, bool bypass_limits,
1080  bool test_accept,
1081  unsigned int heightOverride) {
1083  assert(active_chainstate.GetMempool() != nullptr);
1084  CTxMemPool &pool{*active_chainstate.GetMempool()};
1085 
1086  std::vector<COutPoint> coins_to_uncache;
1087  auto args = MemPoolAccept::ATMPArgs::SingleAccept(
1088  config, accept_time, bypass_limits, coins_to_uncache, test_accept,
1089  heightOverride);
1090  const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate)
1091  .AcceptSingleTransaction(tx, args);
1093  // Remove coins that were not present in the coins cache before calling
1094  // ATMPW; this is to prevent memory DoS in case we receive a large
1095  // number of invalid transactions that attempt to overrun the in-memory
1096  // coins cache
1097  // (`CCoinsViewCache::cacheCoins`).
1098 
1099  for (const COutPoint &outpoint : coins_to_uncache) {
1100  active_chainstate.CoinsTip().Uncache(outpoint);
1101  }
1102  }
1103 
1104  // After we've (potentially) uncached entries, ensure our coins cache is
1105  // still within its size limits
1106  BlockValidationState stateDummy;
1107  active_chainstate.FlushStateToDisk(stateDummy, FlushStateMode::PERIODIC);
1108  return result;
1109 }
1110 
1112 ProcessNewPackage(const Config &config, Chainstate &active_chainstate,
1113  CTxMemPool &pool, const Package &package, bool test_accept) {
1115  assert(!package.empty());
1116  assert(std::all_of(package.cbegin(), package.cend(),
1117  [](const auto &tx) { return tx != nullptr; }));
1118 
1119  std::vector<COutPoint> coins_to_uncache;
1120  const auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1122  if (test_accept) {
1123  auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(
1124  config, GetTime(), coins_to_uncache);
1125  return MemPoolAccept(pool, active_chainstate)
1126  .AcceptMultipleTransactions(package, args);
1127  } else {
1128  auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(
1129  config, GetTime(), coins_to_uncache);
1130  return MemPoolAccept(pool, active_chainstate)
1131  .AcceptPackage(package, args);
1132  }
1133  }();
1134 
1135  // Uncache coins pertaining to transactions that were not submitted to the
1136  // mempool.
1137  if (test_accept || result.m_state.IsInvalid()) {
1138  for (const COutPoint &hashTx : coins_to_uncache) {
1139  active_chainstate.CoinsTip().Uncache(hashTx);
1140  }
1141  }
1142  // Ensure the coins cache is still within limits.
1143  BlockValidationState state_dummy;
1144  active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
1145  return result;
1146 }
1147 
1148 Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) {
1149  int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
1150  // Force block reward to zero when right shift is undefined.
1151  if (halvings >= 64) {
1152  return Amount::zero();
1153  }
1154 
1155  Amount nSubsidy = 50 * COIN;
1156  // Subsidy is cut in half every 210,000 blocks which will occur
1157  // approximately every 4 years.
1158  return ((nSubsidy / SATOSHI) >> halvings) * SATOSHI;
1159 }
1160 
1161 CoinsViews::CoinsViews(std::string ldb_name, size_t cache_size_bytes,
1162  bool in_memory, bool should_wipe)
1163  : m_dbview(gArgs.GetDataDirNet() / ldb_name, cache_size_bytes, in_memory,
1164  should_wipe),
1165  m_catcherview(&m_dbview) {}
1166 
1167 void CoinsViews::InitCache() {
1169  m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
1170 }
1171 
1173  ChainstateManager &chainman,
1174  std::optional<BlockHash> from_snapshot_blockhash)
1175  : m_mempool(mempool), m_blockman(blockman), m_chainman(chainman),
1176  m_from_snapshot_blockhash(from_snapshot_blockhash) {}
1177 
1178 void Chainstate::InitCoinsDB(size_t cache_size_bytes, bool in_memory,
1179  bool should_wipe, std::string leveldb_name) {
1181  leveldb_name += node::SNAPSHOT_CHAINSTATE_SUFFIX;
1182  }
1183  m_coins_views = std::make_unique<CoinsViews>(leveldb_name, cache_size_bytes,
1184  in_memory, should_wipe);
1185 }
1186 
1187 void Chainstate::InitCoinsCache(size_t cache_size_bytes) {
1189  assert(m_coins_views != nullptr);
1190  m_coinstip_cache_size_bytes = cache_size_bytes;
1191  m_coins_views->InitCache();
1192 }
1193 
1194 // Note that though this is marked const, we may end up modifying
1195 // `m_cached_finished_ibd`, which is a performance-related implementation
1196 // detail. This function must be marked `const` so that `CValidationInterface`
1197 // clients (which are given a `const Chainstate*`) can call it.
1198 //
1200  // Optimization: pre-test latch before taking the lock.
1201  if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
1202  return false;
1203  }
1204 
1205  LOCK(cs_main);
1206  if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
1207  return false;
1208  }
1210  return true;
1211  }
1212  if (m_chain.Tip() == nullptr) {
1213  return true;
1214  }
1216  return true;
1217  }
1218  if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) {
1219  return true;
1220  }
1221  LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
1222  m_cached_finished_ibd.store(true, std::memory_order_relaxed);
1223  return false;
1224 }
1225 
1226 static void AlertNotify(const std::string &strMessage) {
1227  uiInterface.NotifyAlertChanged();
1228 #if defined(HAVE_SYSTEM)
1229  std::string strCmd = gArgs.GetArg("-alertnotify", "");
1230  if (strCmd.empty()) {
1231  return;
1232  }
1233 
1234  // Alert text should be plain ascii coming from a trusted source, but to be
1235  // safe we first strip anything not in safeChars, then add single quotes
1236  // around the whole string before passing it to the shell:
1237  std::string singleQuote("'");
1238  std::string safeStatus = SanitizeString(strMessage);
1239  safeStatus = singleQuote + safeStatus + singleQuote;
1240  ReplaceAll(strCmd, "%s", safeStatus);
1241 
1242  std::thread t(runCommand, strCmd);
1243  // thread runs free
1244  t.detach();
1245 #endif
1246 }
1247 
1250 
1251  // Before we get past initial download, we cannot reliably alert about forks
1252  // (we assume we don't get stuck on a fork before finishing our initial
1253  // sync)
1254  if (IsInitialBlockDownload()) {
1255  return;
1256  }
1257 
1258  // If our best fork is no longer within 72 blocks (+/- 12 hours if no one
1259  // mines it) of our head, or if it is back on the active chain, drop it
1260  if (m_best_fork_tip && (m_chain.Height() - m_best_fork_tip->nHeight >= 72 ||
1262  m_best_fork_tip = nullptr;
1263  }
1264 
1265  if (m_best_fork_tip ||
1266  (m_chainman.m_best_invalid &&
1267  m_chainman.m_best_invalid->nChainWork >
1268  m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6))) {
1270  std::string warning =
1271  std::string("'Warning: Large-work fork detected, forking after "
1272  "block ") +
1273  m_best_fork_base->phashBlock->ToString() + std::string("'");
1274  AlertNotify(warning);
1275  }
1276 
1278  LogPrintf("%s: Warning: Large fork found\n forking the "
1279  "chain at height %d (%s)\n lasting to height %d "
1280  "(%s).\nChain state database corruption likely.\n",
1281  __func__, m_best_fork_base->nHeight,
1285  SetfLargeWorkForkFound(true);
1286  } else {
1287  LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks "
1288  "longer than our best chain.\nChain state database "
1289  "corruption likely.\n",
1290  __func__);
1292  }
1293  } else {
1294  SetfLargeWorkForkFound(false);
1296  }
1297 }
1298 
1300  CBlockIndex *pindexNewForkTip) {
1302 
1303  // If we are on a fork that is sufficiently large, set a warning flag.
1304  const CBlockIndex *pfork = m_chain.FindFork(pindexNewForkTip);
1305 
1306  // We define a condition where we should warn the user about as a fork of at
1307  // least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines
1308  // it) of ours. We use 7 blocks rather arbitrarily as it represents just
1309  // under 10% of sustained network hash rate operating on the fork, or a
1310  // chain that is entirely longer than ours and invalid (note that this
1311  // should be detected by both). We define it this way because it allows us
1312  // to only store the highest fork tip (+ base) which meets the 7-block
1313  // condition and from this always have the most-likely-to-cause-warning fork
1314  if (pfork &&
1315  (!m_best_fork_tip ||
1316  pindexNewForkTip->nHeight > m_best_fork_tip->nHeight) &&
1317  pindexNewForkTip->nChainWork - pfork->nChainWork >
1318  (GetBlockProof(*pfork) * 7) &&
1319  m_chain.Height() - pindexNewForkTip->nHeight < 72) {
1320  m_best_fork_tip = pindexNewForkTip;
1321  m_best_fork_base = pfork;
1322  }
1323 
1325 }
1326 
1327 // Called both upon regular invalid block discovery *and* InvalidateBlock
1330  if (!m_chainman.m_best_invalid ||
1331  pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork) {
1332  m_chainman.m_best_invalid = pindexNew;
1333  }
1334  if (m_chainman.m_best_header != nullptr &&
1335  m_chainman.m_best_header->GetAncestor(pindexNew->nHeight) ==
1336  pindexNew) {
1337  m_chainman.m_best_header = m_chain.Tip();
1338  }
1339 
1340  // If the invalid chain found is supposed to be finalized, we need to move
1341  // back the finalization point.
1342  if (IsBlockAvalancheFinalized(pindexNew)) {
1344  m_avalancheFinalizedBlockIndex = pindexNew->pprev;
1345  }
1346 
1347  LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n",
1348  __func__, pindexNew->GetBlockHash().ToString(),
1349  pindexNew->nHeight,
1350  log(pindexNew->nChainWork.getdouble()) / log(2.0),
1351  FormatISO8601DateTime(pindexNew->GetBlockTime()));
1352  CBlockIndex *tip = m_chain.Tip();
1353  assert(tip);
1354  LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n",
1355  __func__, tip->GetBlockHash().ToString(), m_chain.Height(),
1356  log(tip->nChainWork.getdouble()) / log(2.0),
1358 }
1359 
1360 // Same as InvalidChainFound, above, except not called directly from
1361 // InvalidateBlock, which does its own setBlockIndexCandidates management.
1363  const BlockValidationState &state) {
1366  pindex->nStatus = pindex->nStatus.withFailed();
1367  m_chainman.m_failed_blocks.insert(pindex);
1368  m_blockman.m_dirty_blockindex.insert(pindex);
1369  InvalidChainFound(pindex);
1370  }
1371 }
1372 
1373 void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
1374  int nHeight) {
1375  // Mark inputs spent.
1376  if (tx.IsCoinBase()) {
1377  return;
1378  }
1379 
1380  txundo.vprevout.reserve(tx.vin.size());
1381  for (const CTxIn &txin : tx.vin) {
1382  txundo.vprevout.emplace_back();
1383  bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back());
1384  assert(is_spent);
1385  }
1386 }
1387 
1388 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
1389  int nHeight) {
1390  SpendCoins(view, tx, txundo, nHeight);
1391  AddCoins(view, tx, nHeight);
1392 }
1393 
1395  const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1396  if (!VerifyScript(scriptSig, m_tx_out.scriptPubKey, nFlags,
1399  metrics, &error)) {
1400  return false;
1401  }
1402  if ((pTxLimitSigChecks &&
1406  // we can't assign a meaningful script error (since the script
1407  // succeeded), but remove the ScriptError::OK which could be
1408  // misinterpreted.
1410  return false;
1411  }
1412  return true;
1413 }
1414 
1416  const CCoinsViewCache &inputs, const uint32_t flags,
1417  bool sigCacheStore, bool scriptCacheStore,
1418  const PrecomputedTransactionData &txdata,
1419  int &nSigChecksOut, TxSigCheckLimiter &txLimitSigChecks,
1420  CheckInputsLimiter *pBlockLimitSigChecks,
1421  std::vector<CScriptCheck> *pvChecks) {
1423  assert(!tx.IsCoinBase());
1424 
1425  if (pvChecks) {
1426  pvChecks->reserve(tx.vin.size());
1427  }
1428 
1429  // First check if script executions have been cached with the same flags.
1430  // Note that this assumes that the inputs provided are correct (ie that the
1431  // transaction hash which is in tx's prevouts properly commits to the
1432  // scriptPubKey in the inputs view of that transaction).
1433  ScriptCacheKey hashCacheEntry(tx, flags);
1434  if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore, nSigChecksOut)) {
1435  if (!txLimitSigChecks.consume_and_check(nSigChecksOut) ||
1436  (pBlockLimitSigChecks &&
1437  !pBlockLimitSigChecks->consume_and_check(nSigChecksOut))) {
1439  "too-many-sigchecks");
1440  }
1441  return true;
1442  }
1443 
1444  int nSigChecksTotal = 0;
1445 
1446  for (size_t i = 0; i < tx.vin.size(); i++) {
1447  const COutPoint &prevout = tx.vin[i].prevout;
1448  const Coin &coin = inputs.AccessCoin(prevout);
1449  assert(!coin.IsSpent());
1450 
1451  // We very carefully only pass in things to CScriptCheck which are
1452  // clearly committed to by tx's hash. This provides a sanity
1453  // check that our caching is not introducing consensus failures through
1454  // additional data in, eg, the coins being spent being checked as a part
1455  // of CScriptCheck.
1456 
1457  // Verify signature
1458  CScriptCheck check(coin.GetTxOut(), tx, i, flags, sigCacheStore, txdata,
1459  &txLimitSigChecks, pBlockLimitSigChecks);
1460 
1461  // If pvChecks is not null, defer the check execution to the caller.
1462  if (pvChecks) {
1463  pvChecks->push_back(std::move(check));
1464  continue;
1465  }
1466 
1467  if (!check()) {
1468  ScriptError scriptError = check.GetScriptError();
1469  // Compute flags without the optional standardness flags.
1470  // This differs from MANDATORY_SCRIPT_VERIFY_FLAGS as it contains
1471  // additional upgrade flags (see AcceptToMemoryPoolWorker variable
1472  // extraFlags).
1473  uint32_t mandatoryFlags =
1475  if (flags != mandatoryFlags) {
1476  // Check whether the failure was caused by a non-mandatory
1477  // script verification check. If so, ensure we return
1478  // NOT_STANDARD instead of CONSENSUS to avoid downstream users
1479  // splitting the network between upgraded and non-upgraded nodes
1480  // by banning CONSENSUS-failing data providers.
1481  CScriptCheck check2(coin.GetTxOut(), tx, i, mandatoryFlags,
1482  sigCacheStore, txdata);
1483  if (check2()) {
1484  return state.Invalid(
1486  strprintf("non-mandatory-script-verify-flag (%s)",
1487  ScriptErrorString(scriptError)));
1488  }
1489  // update the error message to reflect the mandatory violation.
1490  scriptError = check2.GetScriptError();
1491  }
1492 
1493  // MANDATORY flag failures correspond to
1494  // TxValidationResult::TX_CONSENSUS. Because CONSENSUS failures are
1495  // the most serious case of validation failures, we may need to
1496  // consider using RECENT_CONSENSUS_CHANGE for any script failure
1497  // that could be due to non-upgraded nodes which we may want to
1498  // support, to avoid splitting the network (but this depends on the
1499  // details of how net_processing handles such errors).
1500  return state.Invalid(
1502  strprintf("mandatory-script-verify-flag-failed (%s)",
1503  ScriptErrorString(scriptError)));
1504  }
1505 
1506  nSigChecksTotal += check.GetScriptExecutionMetrics().nSigChecks;
1507  }
1508 
1509  nSigChecksOut = nSigChecksTotal;
1510 
1511  if (scriptCacheStore && !pvChecks) {
1512  // We executed all of the provided scripts, and were told to cache the
1513  // result. Do so now.
1514  AddKeyInScriptCache(hashCacheEntry, nSigChecksTotal);
1515  }
1516 
1517  return true;
1518 }
1519 
1520 bool AbortNode(BlockValidationState &state, const std::string &strMessage,
1521  const bilingual_str &userMessage) {
1522  AbortNode(strMessage, userMessage);
1523  return state.Error(strMessage);
1524 }
1525 
1528  const COutPoint &out) {
1529  bool fClean = true;
1530 
1531  if (view.HaveCoin(out)) {
1532  // Overwriting transaction output.
1533  fClean = false;
1534  }
1535 
1536  if (undo.GetHeight() == 0) {
1537  // Missing undo metadata (height and coinbase). Older versions included
1538  // this information only in undo records for the last spend of a
1539  // transactions' outputs. This implies that it must be present for some
1540  // other output of the same tx.
1541  const Coin &alternate = AccessByTxid(view, out.GetTxId());
1542  if (alternate.IsSpent()) {
1543  // Adding output for transaction without known metadata
1544  return DisconnectResult::FAILED;
1545  }
1546 
1547  // This is somewhat ugly, but hopefully utility is limited. This is only
1548  // useful when working from legacy on disck data. In any case, putting
1549  // the correct information in there doesn't hurt.
1550  const_cast<Coin &>(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(),
1551  alternate.IsCoinBase());
1552  }
1553 
1554  // If the coin already exists as an unspent coin in the cache, then the
1555  // possible_overwrite parameter to AddCoin must be set to true. We have
1556  // already checked whether an unspent coin exists above using HaveCoin, so
1557  // we don't need to guess. When fClean is false, an unspent coin already
1558  // existed and it is an overwrite.
1559  view.AddCoin(out, std::move(undo), !fClean);
1560 
1562 }
1563 
1568 DisconnectResult Chainstate::DisconnectBlock(const CBlock &block,
1569  const CBlockIndex *pindex,
1570  CCoinsViewCache &view) {
1572  CBlockUndo blockUndo;
1573  if (!UndoReadFromDisk(blockUndo, pindex)) {
1574  error("DisconnectBlock(): failure reading undo data");
1575  return DisconnectResult::FAILED;
1576  }
1577 
1578  return ApplyBlockUndo(std::move(blockUndo), block, pindex, view);
1579 }
1580 
1582  const CBlockIndex *pindex,
1583  CCoinsViewCache &view) {
1584  bool fClean = true;
1585 
1586  if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1587  error("DisconnectBlock(): block and undo data inconsistent");
1588  return DisconnectResult::FAILED;
1589  }
1590 
1591  // First, restore inputs.
1592  for (size_t i = 1; i < block.vtx.size(); i++) {
1593  const CTransaction &tx = *(block.vtx[i]);
1594  CTxUndo &txundo = blockUndo.vtxundo[i - 1];
1595  if (txundo.vprevout.size() != tx.vin.size()) {
1596  error("DisconnectBlock(): transaction and undo data inconsistent");
1597  return DisconnectResult::FAILED;
1598  }
1599 
1600  for (size_t j = 0; j < tx.vin.size(); j++) {
1601  const COutPoint &out = tx.vin[j].prevout;
1602  DisconnectResult res =
1603  UndoCoinSpend(std::move(txundo.vprevout[j]), view, out);
1604  if (res == DisconnectResult::FAILED) {
1605  return DisconnectResult::FAILED;
1606  }
1607  fClean = fClean && res != DisconnectResult::UNCLEAN;
1608  }
1609  // At this point, all of txundo.vprevout should have been moved out.
1610  }
1611 
1612  // Second, revert created outputs.
1613  for (const auto &ptx : block.vtx) {
1614  const CTransaction &tx = *ptx;
1615  const TxId &txid = tx.GetId();
1616  const bool is_coinbase = tx.IsCoinBase();
1617 
1618  // Check that all outputs are available and match the outputs in the
1619  // block itself exactly.
1620  for (size_t o = 0; o < tx.vout.size(); o++) {
1621  if (tx.vout[o].scriptPubKey.IsUnspendable()) {
1622  continue;
1623  }
1624 
1625  COutPoint out(txid, o);
1626  Coin coin;
1627  bool is_spent = view.SpendCoin(out, &coin);
1628  if (!is_spent || tx.vout[o] != coin.GetTxOut() ||
1629  uint32_t(pindex->nHeight) != coin.GetHeight() ||
1630  is_coinbase != coin.IsCoinBase()) {
1631  // transaction output mismatch
1632  fClean = false;
1633  }
1634  }
1635  }
1636 
1637  // Move best block pointer to previous block.
1638  view.SetBestBlock(block.hashPrevBlock);
1639 
1641 }
1642 
1644 
1645 void StartScriptCheckWorkerThreads(int threads_num) {
1646  scriptcheckqueue.StartWorkerThreads(threads_num);
1647 }
1648 
1650  scriptcheckqueue.StopWorkerThreads();
1651 }
1652 
1653 // Returns the script flags which should be checked for the block after
1654 // the given block.
1655 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
1656  const CBlockIndex *pindex) {
1657  uint32_t flags = SCRIPT_VERIFY_NONE;
1658 
1659  // Enforce P2SH (BIP16)
1660  if (DeploymentActiveAfter(pindex, params, Consensus::DEPLOYMENT_P2SH)) {
1662  }
1663 
1664  // Enforce the DERSIG (BIP66) rule.
1665  if (DeploymentActiveAfter(pindex, params, Consensus::DEPLOYMENT_DERSIG)) {
1667  }
1668 
1669  // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule.
1670  if (DeploymentActiveAfter(pindex, params, Consensus::DEPLOYMENT_CLTV)) {
1672  }
1673 
1674  // Start enforcing CSV (BIP68, BIP112 and BIP113) rule.
1675  if (DeploymentActiveAfter(pindex, params, Consensus::DEPLOYMENT_CSV)) {
1677  }
1678 
1679  // If the UAHF is enabled, we start accepting replay protected txns
1680  if (IsUAHFenabled(params, pindex)) {
1683  }
1684 
1685  // If the DAA HF is enabled, we start rejecting transaction that use a high
1686  // s in their signature. We also make sure that signature that are supposed
1687  // to fail (for instance in multisig or other forms of smart contracts) are
1688  // null.
1689  if (IsDAAEnabled(params, pindex)) {
1692  }
1693 
1694  // When the magnetic anomaly fork is enabled, we start accepting
1695  // transactions using the OP_CHECKDATASIG opcode and it's verify
1696  // alternative. We also start enforcing push only signatures and
1697  // clean stack.
1698  if (IsMagneticAnomalyEnabled(params, pindex)) {
1701  }
1702 
1703  if (IsGravitonEnabled(params, pindex)) {
1706  }
1707 
1708  if (IsPhononEnabled(params, pindex)) {
1710  }
1711 
1712  // We make sure this node will have replay protection during the next hard
1713  // fork.
1714  if (IsReplayProtectionEnabled(params, pindex)) {
1716  }
1717 
1718  return flags;
1719 }
1720 
1721 static int64_t nTimeCheck = 0;
1722 static int64_t nTimeForks = 0;
1723 static int64_t nTimeVerify = 0;
1724 static int64_t nTimeConnect = 0;
1725 static int64_t nTimeIndex = 0;
1726 static int64_t nTimeTotal = 0;
1727 static int64_t nBlocksTotal = 0;
1728 
1735 bool Chainstate::ConnectBlock(const CBlock &block, BlockValidationState &state,
1736  CBlockIndex *pindex, CCoinsViewCache &view,
1737  BlockValidationOptions options, Amount *blockFees,
1738  bool fJustCheck) {
1740  assert(pindex);
1741 
1742  const BlockHash block_hash{block.GetHash()};
1743  assert(*pindex->phashBlock == block_hash);
1744 
1745  int64_t nTimeStart = GetTimeMicros();
1746 
1747  const CChainParams &params{m_chainman.GetParams()};
1748  const Consensus::Params &consensusParams = params.GetConsensus();
1749 
1750  // Check it again in case a previous version let a bad block in
1751  // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
1752  // ContextualCheckBlockHeader() here. This means that if we add a new
1753  // consensus rule that is enforced in one of those two functions, then we
1754  // may have let in a block that violates the rule prior to updating the
1755  // software, and we would NOT be enforcing the rule here. Fully solving
1756  // upgrade from one software version to the next after a consensus rule
1757  // change is potentially tricky and issue-specific.
1758  // Also, currently the rule against blocks more than 2 hours in the future
1759  // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
1760  // re-enforce that rule here (at least until we make it impossible for
1761  // m_adjusted_time_callback() to go backward).
1762  if (!CheckBlock(block, state, consensusParams,
1763  options.withCheckPoW(!fJustCheck)
1764  .withCheckMerkleRoot(!fJustCheck))) {
1766  // We don't write down blocks to disk if they may have been
1767  // corrupted, so this should be impossible unless we're having
1768  // hardware problems.
1769  return AbortNode(state, "Corrupt block found indicating potential "
1770  "hardware failure; shutting down");
1771  }
1772  return error("%s: Consensus::CheckBlock: %s", __func__,
1773  state.ToString());
1774  }
1775 
1776  // Verify that the view's current state corresponds to the previous block
1777  BlockHash hashPrevBlock =
1778  pindex->pprev == nullptr ? BlockHash() : pindex->pprev->GetBlockHash();
1779  assert(hashPrevBlock == view.GetBestBlock());
1780 
1781  nBlocksTotal++;
1782 
1783  // Special case for the genesis block, skipping connection of its
1784  // transactions (its coinbase is unspendable)
1785  if (block_hash == consensusParams.hashGenesisBlock) {
1786  if (!fJustCheck) {
1787  view.SetBestBlock(pindex->GetBlockHash());
1788  }
1789 
1790  return true;
1791  }
1792 
1793  bool fScriptChecks = true;
1794  if (!hashAssumeValid.IsNull()) {
1795  // We've been configured with the hash of a block which has been
1796  // externally verified to have a valid history. A suitable default value
1797  // is included with the software and updated from time to time. Because
1798  // validity relative to a piece of software is an objective fact these
1799  // defaults can be easily reviewed. This setting doesn't force the
1800  // selection of any particular chain but makes validating some faster by
1801  // effectively caching the result of part of the verification.
1802  BlockMap::const_iterator it =
1803  m_blockman.m_block_index.find(hashAssumeValid);
1804  if (it != m_blockman.m_block_index.end()) {
1805  if (it->second.GetAncestor(pindex->nHeight) == pindex &&
1806  m_chainman.m_best_header->GetAncestor(pindex->nHeight) ==
1807  pindex &&
1808  m_chainman.m_best_header->nChainWork >= nMinimumChainWork) {
1809  // This block is a member of the assumed verified chain and an
1810  // ancestor of the best header.
1811  // Script verification is skipped when connecting blocks under
1812  // the assumevalid block. Assuming the assumevalid block is
1813  // valid this is safe because block merkle hashes are still
1814  // computed and checked, Of course, if an assumed valid block is
1815  // invalid due to false scriptSigs this optimization would allow
1816  // an invalid chain to be accepted.
1817  // The equivalent time check discourages hash power from
1818  // extorting the network via DOS attack into accepting an
1819  // invalid block through telling users they must manually set
1820  // assumevalid. Requiring a software change or burying the
1821  // invalid block, regardless of the setting, makes it hard to
1822  // hide the implication of the demand. This also avoids having
1823  // release candidates that are hardly doing any signature
1824  // verification at all in testing without having to artificially
1825  // set the default assumed verified block further back. The test
1826  // against nMinimumChainWork prevents the skipping when denied
1827  // access to any chain at least as good as the expected chain.
1828  fScriptChecks = (GetBlockProofEquivalentTime(
1829  *m_chainman.m_best_header, *pindex,
1830  *m_chainman.m_best_header,
1831  consensusParams) <= 60 * 60 * 24 * 7 * 2);
1832  }
1833  }
1834  }
1835 
1836  int64_t nTime1 = GetTimeMicros();
1837  nTimeCheck += nTime1 - nTimeStart;
1838  LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n",
1839  MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO,
1841 
1842  // Do not allow blocks that contain transactions which 'overwrite' older
1843  // transactions, unless those are already completely spent. If such
1844  // overwrites are allowed, coinbases and transactions depending upon those
1845  // can be duplicated to remove the ability to spend the first instance --
1846  // even after being sent to another address.
1847  // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html
1848  // for more information. This rule was originally applied to all blocks
1849  // with a timestamp after March 15, 2012, 0:00 UTC. Now that the whole
1850  // chain is irreversibly beyond that time it is applied to all blocks
1851  // except the two in the chain that violate it. This prevents exploiting
1852  // the issue against nodes during their initial block download.
1853  bool fEnforceBIP30 = !((pindex->nHeight == 91842 &&
1854  pindex->GetBlockHash() ==
1855  uint256S("0x00000000000a4d0a398161ffc163c503763"
1856  "b1f4360639393e0e4c8e300e0caec")) ||
1857  (pindex->nHeight == 91880 &&
1858  pindex->GetBlockHash() ==
1859  uint256S("0x00000000000743f190a18c5577a3c2d2a1f"
1860  "610ae9601ac046a38084ccb7cd721")));
1861 
1862  // Once BIP34 activated it was not possible to create new duplicate
1863  // coinbases and thus other than starting with the 2 existing duplicate
1864  // coinbase pairs, not possible to create overwriting txs. But by the time
1865  // BIP34 activated, in each of the existing pairs the duplicate coinbase had
1866  // overwritten the first before the first had been spent. Since those
1867  // coinbases are sufficiently buried it's no longer possible to create
1868  // further duplicate transactions descending from the known pairs either. If
1869  // we're on the known chain at height greater than where BIP34 activated, we
1870  // can save the db accesses needed for the BIP30 check.
1871 
1872  // BIP34 requires that a block at height X (block X) has its coinbase
1873  // scriptSig start with a CScriptNum of X (indicated height X). The above
1874  // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
1875  // case that there is a block X before the BIP34 height of 227,931 which has
1876  // an indicated height Y where Y is greater than X. The coinbase for block
1877  // X would also be a valid coinbase for block Y, which could be a BIP30
1878  // violation. An exhaustive search of all mainnet coinbases before the
1879  // BIP34 height which have an indicated height greater than the block height
1880  // reveals many occurrences. The 3 lowest indicated heights found are
1881  // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
1882  // heights would be the first opportunity for BIP30 to be violated.
1883 
1884  // The search reveals a great many blocks which have an indicated height
1885  // greater than 1,983,702, so we simply remove the optimization to skip
1886  // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
1887  // that block in another 25 years or so, we should take advantage of a
1888  // future consensus change to do a new and improved version of BIP34 that
1889  // will actually prevent ever creating any duplicate coinbases in the
1890  // future.
1891  static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
1892 
1893  // There is no potential to create a duplicate coinbase at block 209,921
1894  // because this is still before the BIP34 height and so explicit BIP30
1895  // checking is still active.
1896 
1897  // The final case is block 176,684 which has an indicated height of
1898  // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
1899  // before block 490,897 so there was not much opportunity to address this
1900  // case other than to carefully analyze it and determine it would not be a
1901  // problem. Block 490,897 was, in fact, mined with a different coinbase than
1902  // block 176,684, but it is important to note that even if it hadn't been or
1903  // is remined on an alternate fork with a duplicate coinbase, we would still
1904  // not run into a BIP30 violation. This is because the coinbase for 176,684
1905  // is spent in block 185,956 in transaction
1906  // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
1907  // spending transaction can't be duplicated because it also spends coinbase
1908  // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
1909  // coinbase has an indicated height of over 4.2 billion, and wouldn't be
1910  // duplicatable until that height, and it's currently impossible to create a
1911  // chain that long. Nevertheless we may wish to consider a future soft fork
1912  // which retroactively prevents block 490,897 from creating a duplicate
1913  // coinbase. The two historical BIP30 violations often provide a confusing
1914  // edge case when manipulating the UTXO and it would be simpler not to have
1915  // another edge case to deal with.
1916 
1917  // testnet3 has no blocks before the BIP34 height with indicated heights
1918  // post BIP34 before approximately height 486,000,000 and presumably will
1919  // be reset before it reaches block 1,983,702 and starts doing unnecessary
1920  // BIP30 checking again.
1921  assert(pindex->pprev);
1922  CBlockIndex *pindexBIP34height =
1923  pindex->pprev->GetAncestor(consensusParams.BIP34Height);
1924  // Only continue to enforce if we're below BIP34 activation height or the
1925  // block hash at that height doesn't correspond.
1926  fEnforceBIP30 =
1927  fEnforceBIP30 &&
1928  (!pindexBIP34height ||
1929  !(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash));
1930 
1931  // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have
1932  // a consensus change that ensures coinbases at those heights can not
1933  // duplicate earlier coinbases.
1934  if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
1935  for (const auto &tx : block.vtx) {
1936  for (size_t o = 0; o < tx->vout.size(); o++) {
1937  if (view.HaveCoin(COutPoint(tx->GetId(), o))) {
1938  LogPrintf("ERROR: ConnectBlock(): tried to overwrite "
1939  "transaction\n");
1941  "bad-txns-BIP30");
1942  }
1943  }
1944  }
1945  }
1946 
1947  // Enforce BIP68 (sequence locks).
1948  int nLockTimeFlags = 0;
1949  if (DeploymentActiveAt(*pindex, consensusParams,
1951  nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
1952  }
1953 
1954  const uint32_t flags =
1955  GetNextBlockScriptFlags(consensusParams, pindex->pprev);
1956 
1957  int64_t nTime2 = GetTimeMicros();
1958  nTimeForks += nTime2 - nTime1;
1959  LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n",
1960  MILLI * (nTime2 - nTime1), nTimeForks * MICRO,
1962 
1963  std::vector<int> prevheights;
1964  Amount nFees = Amount::zero();
1965  int nInputs = 0;
1966 
1967  // Limit the total executed signature operations in the block, a consensus
1968  // rule. Tracking during the CPU-consuming part (validation of uncached
1969  // inputs) is per-input atomic and validation in each thread stops very
1970  // quickly after the limit is exceeded, so an adversary cannot cause us to
1971  // exceed the limit by much at all.
1972  CheckInputsLimiter nSigChecksBlockLimiter(
1974 
1975  std::vector<TxSigCheckLimiter> nSigChecksTxLimiters;
1976  nSigChecksTxLimiters.resize(block.vtx.size() - 1);
1977 
1978  CBlockUndo blockundo;
1979  blockundo.vtxundo.resize(block.vtx.size() - 1);
1980 
1981  CCheckQueueControl<CScriptCheck> control(fScriptChecks ? &scriptcheckqueue
1982  : nullptr);
1983 
1984  // Add all outputs
1985  try {
1986  for (const auto &ptx : block.vtx) {
1987  AddCoins(view, *ptx, pindex->nHeight);
1988  }
1989  } catch (const std::logic_error &e) {
1990  // This error will be thrown from AddCoin if we try to connect a block
1991  // containing duplicate transactions. Such a thing should normally be
1992  // caught early nowadays (due to ContextualCheckBlock's CTOR
1993  // enforcement) however some edge cases can escape that:
1994  // - ContextualCheckBlock does not get re-run after saving the block to
1995  // disk, and older versions may have saved a weird block.
1996  // - its checks are not applied to pre-CTOR chains, which we might visit
1997  // with checkpointing off.
1998  LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
2000  "tx-duplicate");
2001  }
2002 
2003  size_t txIndex = 0;
2004  // nSigChecksRet may be accurate (found in cache) or 0 (checks were
2005  // deferred into vChecks).
2006  int nSigChecksRet;
2007  for (const auto &ptx : block.vtx) {
2008  const CTransaction &tx = *ptx;
2009  const bool isCoinBase = tx.IsCoinBase();
2010  nInputs += tx.vin.size();
2011 
2012  {
2013  Amount txfee = Amount::zero();
2014  TxValidationState tx_state;
2015  if (!isCoinBase &&
2016  !Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight,
2017  txfee)) {
2018  // Any transaction validation failure in ConnectBlock is a block
2019  // consensus failure.
2021  tx_state.GetRejectReason(),
2022  tx_state.GetDebugMessage());
2023 
2024  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
2025  tx.GetId().ToString(), state.ToString());
2026  }
2027  nFees += txfee;
2028  }
2029 
2030  if (!MoneyRange(nFees)) {
2031  LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n",
2032  __func__);
2034  "bad-txns-accumulated-fee-outofrange");
2035  }
2036 
2037  // The following checks do not apply to the coinbase.
2038  if (isCoinBase) {
2039  continue;
2040  }
2041 
2042  // Check that transaction is BIP68 final BIP68 lock checks (as
2043  // opposed to nLockTime checks) must be in ConnectBlock because they
2044  // require the UTXO set.
2045  prevheights.resize(tx.vin.size());
2046  for (size_t j = 0; j < tx.vin.size(); j++) {
2047  prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight();
2048  }
2049 
2050  if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
2051  LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n",
2052  __func__);
2054  "bad-txns-nonfinal");
2055  }
2056 
2057  // Don't cache results if we're actually connecting blocks (still
2058  // consult the cache, though).
2059  bool fCacheResults = fJustCheck;
2060 
2061  const bool fEnforceSigCheck = flags & SCRIPT_ENFORCE_SIGCHECKS;
2062  if (!fEnforceSigCheck) {
2063  // Historically, there has been transactions with a very high
2064  // sigcheck count, so we need to disable this check for such
2065  // transactions.
2066  nSigChecksTxLimiters[txIndex] = TxSigCheckLimiter::getDisabled();
2067  }
2068 
2069  std::vector<CScriptCheck> vChecks;
2070  TxValidationState tx_state;
2071  if (fScriptChecks &&
2072  !CheckInputScripts(tx, tx_state, view, flags, fCacheResults,
2073  fCacheResults, PrecomputedTransactionData(tx),
2074  nSigChecksRet, nSigChecksTxLimiters[txIndex],
2075  &nSigChecksBlockLimiter, &vChecks)) {
2076  // Any transaction validation failure in ConnectBlock is a block
2077  // consensus failure
2079  tx_state.GetRejectReason(),
2080  tx_state.GetDebugMessage());
2081  return error(
2082  "ConnectBlock(): CheckInputScripts on %s failed with %s",
2083  tx.GetId().ToString(), state.ToString());
2084  }
2085 
2086  control.Add(vChecks);
2087 
2088  // Note: this must execute in the same iteration as CheckTxInputs (not
2089  // in a separate loop) in order to detect double spends. However,
2090  // this does not prevent double-spending by duplicated transaction
2091  // inputs in the same transaction (cf. CVE-2018-17144) -- that check is
2092  // done in CheckBlock (CheckRegularTransaction).
2093  SpendCoins(view, tx, blockundo.vtxundo.at(txIndex), pindex->nHeight);
2094  txIndex++;
2095  }
2096 
2097  int64_t nTime3 = GetTimeMicros();
2098  nTimeConnect += nTime3 - nTime2;
2100  " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) "
2101  "[%.2fs (%.2fms/blk)]\n",
2102  (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2),
2103  MILLI * (nTime3 - nTime2) / block.vtx.size(),
2104  nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs - 1),
2106 
2107  const Amount blockReward =
2108  nFees + GetBlockSubsidy(pindex->nHeight, consensusParams);
2109  if (block.vtx[0]->GetValueOut() > blockReward) {
2110  LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs "
2111  "limit=%d)\n",
2112  block.vtx[0]->GetValueOut(), blockReward);
2114  "bad-cb-amount");
2115  }
2116 
2117  if (blockFees) {
2118  *blockFees = nFees;
2119  }
2120 
2121  if (!control.Wait()) {
2123  "blk-bad-inputs", "parallel script check failed");
2124  }
2125 
2126  int64_t nTime4 = GetTimeMicros();
2127  nTimeVerify += nTime4 - nTime2;
2128  LogPrint(
2129  BCLog::BENCH,
2130  " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n",
2131  nInputs - 1, MILLI * (nTime4 - nTime2),
2132  nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs - 1),
2134 
2135  if (fJustCheck) {
2136  return true;
2137  }
2138 
2139  if (!m_blockman.WriteUndoDataForBlock(blockundo, state, pindex, params)) {
2140  return false;
2141  }
2142 
2143  if (!pindex->IsValid(BlockValidity::SCRIPTS)) {
2145  m_blockman.m_dirty_blockindex.insert(pindex);
2146  }
2147 
2148  assert(pindex->phashBlock);
2149  // add this block to the view's block chain
2150  view.SetBestBlock(pindex->GetBlockHash());
2151 
2152  int64_t nTime5 = GetTimeMicros();
2153  nTimeIndex += nTime5 - nTime4;
2154  LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n",
2155  MILLI * (nTime5 - nTime4), nTimeIndex * MICRO,
2157 
2158  TRACE6(validation, block_connected, block_hash.data(), pindex->nHeight,
2159  block.vtx.size(), nInputs, nSigChecksRet,
2160  // in microseconds (µs)
2161  nTime5 - nTimeStart);
2162 
2163  return true;
2164 }
2165 
2166 CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState() {
2168  return this->GetCoinsCacheSizeState(m_coinstip_cache_size_bytes,
2170  : 0);
2171 }
2172 
2174 Chainstate::GetCoinsCacheSizeState(size_t max_coins_cache_size_bytes,
2175  size_t max_mempool_size_bytes) {
2177  int64_t nMempoolUsage = m_mempool ? m_mempool->DynamicMemoryUsage() : 0;
2178  int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
2179  int64_t nTotalSpace =
2180  max_coins_cache_size_bytes +
2181  std::max<int64_t>(int64_t(max_mempool_size_bytes) - nMempoolUsage, 0);
2182 
2184  static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES =
2185  10 * 1024 * 1024; // 10MB
2186  int64_t large_threshold = std::max(
2187  (9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
2188 
2189  if (cacheSize > nTotalSpace) {
2190  LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize,
2191  nTotalSpace);
2193  } else if (cacheSize > large_threshold) {
2195  }
2196  return CoinsCacheSizeState::OK;
2197 }
2198 
2200  FlushStateMode mode, int nManualPruneHeight) {
2201  LOCK(cs_main);
2202  assert(this->CanFlushToDisk());
2203  std::set<int> setFilesToPrune;
2204  bool full_flush_completed = false;
2205 
2206  const size_t coins_count = CoinsTip().GetCacheSize();
2207  const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2208 
2209  try {
2210  {
2211  bool fFlushForPrune = false;
2212  bool fDoFullFlush = false;
2213 
2214  CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
2216  if (m_blockman.IsPruneMode() &&
2217  (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) &&
2218  !fReindex) {
2219  // Make sure we don't prune above the blockfilterindexes
2220  // bestblocks. Pruning is height-based.
2221  int last_prune = m_chain.Height();
2223  last_prune = std::max(
2224  1, std::min(last_prune,
2225  index.GetSummary().best_block_height));
2226  });
2227 
2228  if (nManualPruneHeight > 0) {
2230  "find files to prune (manual)", BCLog::BENCH);
2232  setFilesToPrune,
2233  std::min(last_prune, nManualPruneHeight),
2234  m_chain.Height());
2235  } else {
2236  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune",
2237  BCLog::BENCH);
2239  setFilesToPrune,
2241  m_chain.Height(), last_prune, IsInitialBlockDownload());
2243  }
2244  if (!setFilesToPrune.empty()) {
2245  fFlushForPrune = true;
2246  if (!m_blockman.m_have_pruned) {
2247  m_blockman.m_block_tree_db->WriteFlag(
2248  "prunedblockfiles", true);
2249  m_blockman.m_have_pruned = true;
2250  }
2251  }
2252  }
2253  const auto nNow = GetTime<std::chrono::microseconds>();
2254  // Avoid writing/flushing immediately after startup.
2255  if (m_last_write.count() == 0) {
2256  m_last_write = nNow;
2257  }
2258  if (m_last_flush.count() == 0) {
2259  m_last_flush = nNow;
2260  }
2261  // The cache is large and we're within 10% and 10 MiB of the limit,
2262  // but we have time now (not in the middle of a block processing).
2263  bool fCacheLarge = mode == FlushStateMode::PERIODIC &&
2264  cache_state >= CoinsCacheSizeState::LARGE;
2265  // The cache is over the limit, we have to write now.
2266  bool fCacheCritical = mode == FlushStateMode::IF_NEEDED &&
2267  cache_state >= CoinsCacheSizeState::CRITICAL;
2268  // It's been a while since we wrote the block index to disk. Do this
2269  // frequently, so we don't need to redownload after a crash.
2270  bool fPeriodicWrite = mode == FlushStateMode::PERIODIC &&
2272  // It's been very long since we flushed the cache. Do this
2273  // infrequently, to optimize cache usage.
2274  bool fPeriodicFlush = mode == FlushStateMode::PERIODIC &&
2276  // Combine all conditions that result in a full cache flush.
2277  fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge ||
2278  fCacheCritical || fPeriodicFlush || fFlushForPrune;
2279  // Write blocks and block index to disk.
2280  if (fDoFullFlush || fPeriodicWrite) {
2281  // Ensure we can write block index
2283  return AbortNode(state, "Disk space is too low!",
2284  _("Disk space is too low!"));
2285  }
2286 
2287  {
2289  "write block and undo data to disk", BCLog::BENCH);
2290 
2291  // First make sure all block and undo data is flushed to
2292  // disk.
2294  }
2295  // Then update all block file information (which may refer to
2296  // block and undo files).
2297  {
2298  LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk",
2299  BCLog::BENCH);
2300 
2301  if (!m_blockman.WriteBlockIndexDB()) {
2302  return AbortNode(
2303  state, "Failed to write to block index database");
2304  }
2305  }
2306 
2307  // Finally remove any pruned files
2308  if (fFlushForPrune) {
2309  LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files",
2310  BCLog::BENCH);
2311 
2312  UnlinkPrunedFiles(setFilesToPrune);
2313  }
2314  m_last_write = nNow;
2315  }
2316  // Flush best chain related state. This can only be done if the
2317  // blocks / block index write was also done.
2318  if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2320  strprintf("write coins cache to disk (%d coins, %.2fkB)",
2321  coins_count, coins_mem_usage / 1000),
2322  BCLog::BENCH);
2323 
2324  // Typical Coin structures on disk are around 48 bytes in size.
2325  // Pushing a new one to the database can cause it to be written
2326  // twice (once in the log, and once in the tables). This is
2327  // already an overestimation, as most will delete an existing
2328  // entry or overwrite one. Still, use a conservative safety
2329  // factor of 2.
2331  48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2332  return AbortNode(state, "Disk space is too low!",
2333  _("Disk space is too low!"));
2334  }
2335 
2336  // Flush the chainstate (which may refer to block index
2337  // entries).
2338  if (!CoinsTip().Flush()) {
2339  return AbortNode(state, "Failed to write to coin database");
2340  }
2341  m_last_flush = nNow;
2342  full_flush_completed = true;
2343  }
2344 
2345  TRACE5(utxocache, flush,
2346  // in microseconds (µs)
2347  GetTimeMicros() - nNow.count(), uint32_t(mode), coins_count,
2348  uint64_t(coins_mem_usage), fFlushForPrune);
2349  }
2350 
2351  if (full_flush_completed) {
2352  // Update best block in wallet (so we can detect restored wallets).
2354  }
2355  } catch (const std::runtime_error &e) {
2356  return AbortNode(state, std::string("System error while flushing: ") +
2357  e.what());
2358  }
2359  return true;
2360 }
2361 
2363  BlockValidationState state;
2364  if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
2365  LogPrintf("%s: failed to flush state (%s)\n", __func__,
2366  state.ToString());
2367  }
2368 }
2369 
2371  BlockValidationState state;
2373  if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) {
2374  LogPrintf("%s: failed to flush state (%s)\n", __func__,
2375  state.ToString());
2376  }
2377 }
2378 
2379 static void UpdateTipLog(const CCoinsViewCache &coins_tip,
2380  const CBlockIndex *tip, const CChainParams &params,
2381  const std::string &func_name,
2382  const std::string &prefix)
2385  LogPrintf("%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%ld "
2386  "date='%s' progress=%f cache=%.1fMiB(%utxo)\n",
2387  prefix, func_name, tip->GetBlockHash().ToString(), tip->nHeight,
2388  tip->nVersion, log(tip->nChainWork.getdouble()) / log(2.0),
2389  tip->GetChainTxCount(),
2391  GuessVerificationProgress(params.TxData(), tip),
2392  coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
2393  coins_tip.GetCacheSize());
2394 }
2395 
2396 void Chainstate::UpdateTip(const CBlockIndex *pindexNew) {
2398  const auto &coins_tip = CoinsTip();
2399 
2400  const CChainParams &params{m_chainman.GetParams()};
2401 
2402  // The remainder of the function isn't relevant if we are not acting on
2403  // the active chainstate, so return if need be.
2404  if (this != &m_chainman.ActiveChainstate()) {
2405  // Only log every so often so that we don't bury log messages at the
2406  // tip.
2407  constexpr int BACKGROUND_LOG_INTERVAL = 2000;
2408  if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) {
2409  UpdateTipLog(coins_tip, pindexNew, params, __func__,
2410  "[background validation] ");
2411  }
2412  return;
2413  }
2414 
2415  // New best block
2416  if (m_mempool) {
2418  }
2419 
2420  {
2422  g_best_block = pindexNew->GetBlockHash();
2423  g_best_block_cv.notify_all();
2424  }
2425 
2426  UpdateTipLog(coins_tip, pindexNew, params, __func__, "");
2427 }
2428 
2441  DisconnectedBlockTransactions *disconnectpool) {
2443  if (m_mempool) {
2445  }
2446 
2447  CBlockIndex *pindexDelete = m_chain.Tip();
2448  const Consensus::Params &consensusParams = m_chainman.GetConsensus();
2449 
2450  assert(pindexDelete);
2451 
2452  // Read block from disk.
2453  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2454  CBlock &block = *pblock;
2455  if (!ReadBlockFromDisk(block, pindexDelete, consensusParams)) {
2456  return error("DisconnectTip(): Failed to read block");
2457  }
2458 
2459  // Apply the block atomically to the chain state.
2460  int64_t nStart = GetTimeMicros();
2461  {
2462  CCoinsViewCache view(&CoinsTip());
2463  assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2464  if (DisconnectBlock(block, pindexDelete, view) !=
2466  return error("DisconnectTip(): DisconnectBlock %s failed",
2467  pindexDelete->GetBlockHash().ToString());
2468  }
2469 
2470  bool flushed = view.Flush();
2471  assert(flushed);
2472  }
2473 
2474  LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
2475  (GetTimeMicros() - nStart) * MILLI);
2476 
2477  // Write the chain state to disk, if necessary.
2479  return false;
2480  }
2481 
2482  if (m_mempool) {
2483  // If this block is deactivating a fork, we move all mempool
2484  // transactions in front of disconnectpool for reprocessing in a future
2485  // updateMempoolForReorg call
2486  if (pindexDelete->pprev != nullptr &&
2487  GetNextBlockScriptFlags(consensusParams, pindexDelete) !=
2488  GetNextBlockScriptFlags(consensusParams, pindexDelete->pprev)) {
2490  "Disconnecting mempool due to rewind of upgrade block\n");
2491  if (disconnectpool) {
2492  disconnectpool->importMempool(*m_mempool);
2493  }
2494  m_mempool->clear();
2495  }
2496 
2497  if (disconnectpool) {
2498  disconnectpool->addForBlock(block.vtx, *m_mempool);
2499  }
2500  }
2501 
2502  m_chain.SetTip(pindexDelete->pprev);
2503 
2504  UpdateTip(pindexDelete->pprev);
2505  // Let wallets know transactions went from 1-confirmed to
2506  // 0-confirmed or conflicted:
2507  GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2508  return true;
2509 }
2510 
2511 static int64_t nTimeReadFromDisk = 0;
2512 static int64_t nTimeConnectTotal = 0;
2513 static int64_t nTimeFlush = 0;
2514 static int64_t nTimeChainState = 0;
2515 static int64_t nTimePostConnect = 0;
2516 
2518  CBlockIndex *pindex = nullptr;
2519  std::shared_ptr<const CBlock> pblock;
2521 };
2522 
2531 private:
2532  std::vector<PerBlockConnectTrace> blocksConnected;
2533 
2534 public:
2535  explicit ConnectTrace() : blocksConnected(1) {}
2536 
2538  std::shared_ptr<const CBlock> pblock) {
2539  assert(!blocksConnected.back().pindex);
2540  assert(pindex);
2541  assert(pblock);
2542  blocksConnected.back().pindex = pindex;
2543  blocksConnected.back().pblock = std::move(pblock);
2544  blocksConnected.emplace_back();
2545  }
2546 
2547  std::vector<PerBlockConnectTrace> &GetBlocksConnected() {
2548  // We always keep one extra block at the end of our list because blocks
2549  // are added after all the conflicted transactions have been filled in.
2550  // Thus, the last entry should always be an empty one waiting for the
2551  // transactions from the next block. We pop the last entry here to make
2552  // sure the list we return is sane.
2553  assert(!blocksConnected.back().pindex);
2554  blocksConnected.pop_back();
2555  return blocksConnected;
2556  }
2557 };
2558 
2566  BlockPolicyValidationState &blockPolicyState,
2567  CBlockIndex *pindexNew,
2568  const std::shared_ptr<const CBlock> &pblock,
2569  ConnectTrace &connectTrace,
2570  DisconnectedBlockTransactions &disconnectpool) {
2572  if (m_mempool) {
2574  }
2575 
2576  const Consensus::Params &consensusParams = m_chainman.GetConsensus();
2577 
2578  assert(pindexNew->pprev == m_chain.Tip());
2579  // Read block from disk.
2580  int64_t nTime1 = GetTimeMicros();
2581  std::shared_ptr<const CBlock> pthisBlock;
2582  if (!pblock) {
2583  std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2584  if (!ReadBlockFromDisk(*pblockNew, pindexNew, consensusParams)) {
2585  return AbortNode(state, "Failed to read block");
2586  }
2587  pthisBlock = pblockNew;
2588  } else {
2589  pthisBlock = pblock;
2590  }
2591 
2592  const CBlock &blockConnecting = *pthisBlock;
2593 
2594  // Apply the block atomically to the chain state.
2595  int64_t nTime2 = GetTimeMicros();
2596  nTimeReadFromDisk += nTime2 - nTime1;
2597  int64_t nTime3;
2598  LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n",
2599  (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
2600  {
2601  Amount blockFees{Amount::zero()};
2602  CCoinsViewCache view(&CoinsTip());
2603  bool rv = ConnectBlock(blockConnecting, state, pindexNew, view,
2604  BlockValidationOptions(config), &blockFees);
2605  GetMainSignals().BlockChecked(blockConnecting, state);
2606  if (!rv) {
2607  if (state.IsInvalid()) {
2608  InvalidBlockFound(pindexNew, state);
2609  }
2610 
2611  return error("%s: ConnectBlock %s failed, %s", __func__,
2612  pindexNew->GetBlockHash().ToString(),
2613  state.ToString());
2614  }
2615 
2627  const BlockHash blockhash = pindexNew->GetBlockHash();
2628  if (!IsInitialBlockDownload() &&
2631 
2632  const Amount blockReward =
2633  blockFees +
2634  GetBlockSubsidy(pindexNew->nHeight, consensusParams);
2635 
2636  std::vector<std::unique_ptr<ParkingPolicy>> parkingPolicies;
2637  parkingPolicies.emplace_back(std::make_unique<MinerFundPolicy>(
2638  consensusParams, *pindexNew, blockConnecting, blockReward));
2639 
2640  if (g_avalanche) {
2641  parkingPolicies.emplace_back(
2642  std::make_unique<StakingRewardsPolicy>(
2643  consensusParams, *pindexNew, blockConnecting,
2644  blockReward));
2645  }
2646 
2647  // If any block policy is violated, bail on the first one found
2648  if (std::find_if_not(parkingPolicies.begin(), parkingPolicies.end(),
2649  [&](const auto &policy) {
2650  bool ret = (*policy)(blockPolicyState);
2651  if (!ret) {
2652  LogPrintf(
2653  "Park block because it "
2654  "violated a block policy: %s\n",
2655  blockPolicyState.ToString());
2656  }
2657  return ret;
2658  }) != parkingPolicies.end()) {
2659  pindexNew->nStatus = pindexNew->nStatus.withParked();
2660  m_blockman.m_dirty_blockindex.insert(pindexNew);
2661  return false;
2662  }
2663  }
2664 
2665  nTime3 = GetTimeMicros();
2666  nTimeConnectTotal += nTime3 - nTime2;
2667  assert(nBlocksTotal > 0);
2669  " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n",
2670  (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO,
2672  bool flushed = view.Flush();
2673  assert(flushed);
2674  }
2675 
2676  int64_t nTime4 = GetTimeMicros();
2677  nTimeFlush += nTime4 - nTime3;
2678  LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n",
2679  (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO,
2681 
2682  // Write the chain state to disk, if necessary.
2683  if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
2684  return false;
2685  }
2686 
2687  int64_t nTime5 = GetTimeMicros();
2688  nTimeChainState += nTime5 - nTime4;
2690  " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n",
2691  (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO,
2693 
2694  // Remove conflicting transactions from the mempool.;
2695  if (m_mempool) {
2696  m_mempool->removeForBlock(blockConnecting.vtx);
2697  disconnectpool.removeForBlock(blockConnecting.vtx);
2698 
2699  // If this block is activating a fork, we move all mempool transactions
2700  // in front of disconnectpool for reprocessing in a future
2701  // updateMempoolForReorg call
2702  if (pindexNew->pprev != nullptr &&
2703  GetNextBlockScriptFlags(consensusParams, pindexNew) !=
2704  GetNextBlockScriptFlags(consensusParams, pindexNew->pprev)) {
2705  LogPrint(
2707  "Disconnecting mempool due to acceptance of upgrade block\n");
2708  disconnectpool.importMempool(*m_mempool);
2709  }
2710  }
2711 
2712  // Update m_chain & related variables.
2713  m_chain.SetTip(pindexNew);
2714  UpdateTip(pindexNew);
2715 
2716  int64_t nTime6 = GetTimeMicros();
2717  nTimePostConnect += nTime6 - nTime5;
2718  nTimeTotal += nTime6 - nTime1;
2720  " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n",
2721  (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO,
2723  LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n",
2724  (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO,
2726 
2727  // If we are the background validation chainstate, check to see if we are
2728  // done validating the snapshot (i.e. our tip has reached the snapshot's
2729  // base block).
2730  if (this != &m_chainman.ActiveChainstate()) {
2731  // This call may set `m_disabled`, which is referenced immediately
2732  // afterwards in ActivateBestChain, so that we stop connecting blocks
2733  // past the snapshot base.
2734  m_chainman.MaybeCompleteSnapshotValidation();
2735  }
2736 
2737  connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2738  return true;
2739 }
2740 
2746  std::vector<const CBlockIndex *> &blocksToReconcile) {
2748  do {
2749  CBlockIndex *pindexNew = nullptr;
2750 
2751  // Find the best candidate header.
2752  {
2753  std::set<CBlockIndex *, CBlockIndexWorkComparator>::reverse_iterator
2754  it = setBlockIndexCandidates.rbegin();
2755  if (it == setBlockIndexCandidates.rend()) {
2756  return nullptr;
2757  }
2758  pindexNew = *it;
2759  }
2760 
2761  // If this block will cause an avalanche finalized block to be reorged,
2762  // then we park it.
2763  {
2765  if (m_avalancheFinalizedBlockIndex &&
2766  !AreOnTheSameFork(pindexNew, m_avalancheFinalizedBlockIndex)) {
2767  LogPrintf("Park block %s because it forks prior to the "
2768  "avalanche finalized chaintip.\n",
2769  pindexNew->GetBlockHash().ToString());
2770  pindexNew->nStatus = pindexNew->nStatus.withParked();
2771  m_blockman.m_dirty_blockindex.insert(pindexNew);
2772  }
2773  }
2774 
2775  const bool fAvalancheEnabled = isAvalancheEnabled(gArgs);
2776  const bool fAutoUnpark =
2777  gArgs.GetBoolArg("-automaticunparking", !fAvalancheEnabled);
2778 
2779  const CBlockIndex *pindexFork = m_chain.FindFork(pindexNew);
2780 
2781  // Check whether all blocks on the path between the currently active
2782  // chain and the candidate are valid. Just going until the active chain
2783  // is an optimization, as we know all blocks in it are valid already.
2784  CBlockIndex *pindexTest = pindexNew;
2785  bool hasValidAncestor = true;
2786  while (hasValidAncestor && pindexTest && pindexTest != pindexFork) {
2787  assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2788 
2789  // If this is a parked chain, but it has enough PoW, clear the park
2790  // state.
2791  bool fParkedChain = pindexTest->nStatus.isOnParkedChain();
2792  if (fAutoUnpark && fParkedChain) {
2793  const CBlockIndex *pindexTip = m_chain.Tip();
2794 
2795  // During initialization, pindexTip and/or pindexFork may be
2796  // null. In this case, we just ignore the fact that the chain is
2797  // parked.
2798  if (!pindexTip || !pindexFork) {
2799  UnparkBlock(pindexTest);
2800  continue;
2801  }
2802 
2803  // A parked chain can be unparked if it has twice as much PoW
2804  // accumulated as the main chain has since the fork block.
2805  CBlockIndex const *pindexExtraPow = pindexTip;
2806  arith_uint256 requiredWork = pindexTip->nChainWork;
2807  switch (pindexTip->nHeight - pindexFork->nHeight) {
2808  // Limit the penality for depth 1, 2 and 3 to half a block
2809  // worth of work to ensure we don't fork accidentally.
2810  case 3:
2811  case 2:
2812  pindexExtraPow = pindexExtraPow->pprev;
2813  // FALLTHROUGH
2814  case 1: {
2815  const arith_uint256 deltaWork =
2816  pindexExtraPow->nChainWork - pindexFork->nChainWork;
2817  requiredWork += (deltaWork >> 1);
2818  break;
2819  }
2820  default:
2821  requiredWork +=
2822  pindexExtraPow->nChainWork - pindexFork->nChainWork;
2823  break;
2824  }
2825 
2826  if (pindexNew->nChainWork > requiredWork) {
2827  // We have enough, clear the parked state.
2828  LogPrintf("Unpark chain up to block %s as it has "
2829  "accumulated enough PoW.\n",
2830  pindexNew->GetBlockHash().ToString());
2831  fParkedChain = false;
2832  UnparkBlock(pindexTest);
2833  }
2834  }
2835 
2836  // Pruned nodes may have entries in setBlockIndexCandidates for
2837  // which block files have been deleted. Remove those as candidates
2838  // for the most work chain if we come across them; we can't switch
2839  // to a chain unless we have all the non-active-chain parent blocks.
2840  bool fInvalidChain = pindexTest->nStatus.isInvalid();
2841  bool fMissingData = !pindexTest->nStatus.hasData();
2842  if (!(fInvalidChain || fParkedChain || fMissingData)) {
2843  // The current block is acceptable, move to the parent, up to
2844  // the fork point.
2845  pindexTest = pindexTest->pprev;
2846  continue;
2847  }
2848 
2849  // Candidate chain is not usable (either invalid or parked or
2850  // missing data)
2851  hasValidAncestor = false;
2852  setBlockIndexCandidates.erase(pindexTest);
2853 
2854  if (fInvalidChain && (m_chainman.m_best_invalid == nullptr ||
2855  pindexNew->nChainWork >
2856  m_chainman.m_best_invalid->nChainWork)) {
2857  m_chainman.m_best_invalid = pindexNew;
2858  }
2859 
2860  if (fParkedChain && (m_chainman.m_best_parked == nullptr ||
2861  pindexNew->nChainWork >
2862  m_chainman.m_best_parked->nChainWork)) {
2863  m_chainman.m_best_parked = pindexNew;
2864  }
2865 
2866  LogPrintf("Considered switching to better tip %s but that chain "
2867  "contains a%s%s%s block.\n",
2868  pindexNew->GetBlockHash().ToString(),
2869  fInvalidChain ? "n invalid" : "",
2870  fParkedChain ? " parked" : "",
2871  fMissingData ? " missing-data" : "");
2872 
2873  CBlockIndex *pindexFailed = pindexNew;
2874  // Remove the entire chain from the set.
2875  while (pindexTest != pindexFailed) {
2876  if (fInvalidChain || fParkedChain) {
2877  pindexFailed->nStatus =
2878  pindexFailed->nStatus.withFailedParent(fInvalidChain)
2879  .withParkedParent(fParkedChain);
2880  } else if (fMissingData) {
2881  // If we're missing data, then add back to
2882  // m_blocks_unlinked, so that if the block arrives in the
2883  // future we can try adding to setBlockIndexCandidates
2884  // again.
2886  std::make_pair(pindexFailed->pprev, pindexFailed));
2887  }
2888  setBlockIndexCandidates.erase(pindexFailed);
2889  pindexFailed = pindexFailed->pprev;
2890  }
2891 
2892  if (fInvalidChain || fParkedChain) {
2893  // We discovered a new chain tip that is either parked or
2894  // invalid, we may want to warn.
2896  }
2897  }
2898 
2899  if (fAvalancheEnabled && g_avalanche) {
2900  blocksToReconcile.push_back(pindexNew);
2901  }
2902 
2903  // We found a candidate that has valid ancestors. This is our guy.
2904  if (hasValidAncestor) {
2905  return pindexNew;
2906  }
2907  } while (true);
2908 }
2909 
2915  // Note that we can't delete the current block itself, as we may need to
2916  // return to it later in case a reorganization to a better block fails.
2917  auto it = setBlockIndexCandidates.begin();
2918  while (it != setBlockIndexCandidates.end() &&
2919  setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2920  setBlockIndexCandidates.erase(it++);
2921  }
2922 
2923  // Either the current tip or a successor of it we're working towards is left
2924  // in setBlockIndexCandidates.
2925  assert(!setBlockIndexCandidates.empty());
2926 }
2927 
2936  const Config &config, BlockValidationState &state,
2937  CBlockIndex *pindexMostWork, const std::shared_ptr<const CBlock> &pblock,
2938  bool &fInvalidFound, ConnectTrace &connectTrace) {
2940  if (m_mempool) {
2942  }
2943 
2944  const CBlockIndex *pindexOldTip = m_chain.Tip();
2945  const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
2946 
2947  // Disconnect active blocks which are no longer in the best chain.
2948  bool fBlocksDisconnected = false;
2949  DisconnectedBlockTransactions disconnectpool;
2950  while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2951  if (!fBlocksDisconnected) {
2952  // Import and clear mempool; we must do this to preserve
2953  // topological ordering in the mempool index. This is ok since
2954  // inserts into the mempool are very fast now in our new
2955  // implementation.
2956  disconnectpool.importMempool(*m_mempool);
2957  }
2958 
2959  if (!DisconnectTip(state, &disconnectpool)) {
2960  // This is likely a fatal error, but keep the mempool consistent,
2961  // just in case. Only remove from the mempool in this case.
2962  if (m_mempool) {
2963  disconnectpool.updateMempoolForReorg(config, *this, false,
2964  *m_mempool);
2965  }
2966 
2967  // If we're unable to disconnect a block during normal operation,
2968  // then that is a failure of our local system -- we should abort
2969  // rather than stay on a less work chain.
2970  AbortNode(state,
2971  "Failed to disconnect block; see debug.log for details");
2972  return false;
2973  }
2974 
2975  fBlocksDisconnected = true;
2976  }
2977 
2978  // Build list of new blocks to connect.
2979  std::vector<CBlockIndex *> vpindexToConnect;
2980  bool fContinue = true;
2981  int nHeight = pindexFork ? pindexFork->nHeight : -1;
2982  while (fContinue && nHeight != pindexMostWork->nHeight) {
2983  // Don't iterate the entire list of potential improvements toward the
2984  // best tip, as we likely only need a few blocks along the way.
2985  int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
2986  vpindexToConnect.clear();
2987  vpindexToConnect.reserve(nTargetHeight - nHeight);
2988  CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
2989  while (pindexIter && pindexIter->nHeight != nHeight) {
2990  vpindexToConnect.push_back(pindexIter);
2991  pindexIter = pindexIter->pprev;
2992  }
2993 
2994  nHeight = nTargetHeight;
2995 
2996  // Connect new blocks.
2997  for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
2998  BlockPolicyValidationState blockPolicyState;
2999  if (!ConnectTip(config, state, blockPolicyState, pindexConnect,
3000  pindexConnect == pindexMostWork
3001  ? pblock
3002  : std::shared_ptr<const CBlock>(),
3003  connectTrace, disconnectpool)) {
3004  if (state.IsInvalid()) {
3005  // The block violates a consensus rule.
3006  if (state.GetResult() !=
3008  InvalidChainFound(vpindexToConnect.back());
3009  }
3010  state = BlockValidationState();
3011  fInvalidFound = true;
3012  fContinue = false;
3013  break;
3014  }
3015 
3016  if (blockPolicyState.IsInvalid()) {
3017  // The block violates a policy rule.
3018  fContinue = false;
3019  break;
3020  }
3021 
3022  // A system error occurred (disk space, database error, ...).
3023  // Make the mempool consistent with the current tip, just in
3024  // case any observers try to use it before shutdown.
3025  if (m_mempool) {
3026  disconnectpool.updateMempoolForReorg(config, *this, false,
3027  *m_mempool);
3028  }
3029  return false;
3030  } else {
3032  if (!pindexOldTip ||
3033  m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
3034  // We're in a better position than we were. Return
3035  // temporarily to release the lock.
3036  fContinue = false;
3037  break;
3038  }
3039  }
3040  }
3041  }
3042 
3043  if (m_mempool) {
3044  if (fBlocksDisconnected || !disconnectpool.isEmpty()) {
3045  // If any blocks were disconnected, we need to update the mempool
3046  // even if disconnectpool is empty. The disconnectpool may also be
3047  // non-empty if the mempool was imported due to new validation rules
3048  // being in effect.
3050  "Updating mempool due to reorganization or "
3051  "rules upgrade/downgrade\n");
3052  disconnectpool.updateMempoolForReorg(config, *this, true,
3053  *m_mempool);
3054  }
3055 
3056  m_mempool->check(this->CoinsTip(), this->m_chain.Height() + 1);
3057  }
3058 
3059  // Callbacks/notifications for a new best chain.
3060  if (fInvalidFound) {
3061  CheckForkWarningConditionsOnNewFork(pindexMostWork);
3062  } else {
3064  }
3065 
3066  return true;
3067 }
3068 
3070  if (!init) {
3072  }
3073  if (::fReindex) {
3075  }
3077 }
3078 
3080  bool fNotify = false;
3081  bool fInitialBlockDownload = false;
3082  static CBlockIndex *pindexHeaderOld = nullptr;
3083  CBlockIndex *pindexHeader = nullptr;
3084  {
3085  LOCK(cs_main);
3086  pindexHeader = chainstate.m_chainman.m_best_header;
3087 
3088  if (pindexHeader != pindexHeaderOld) {
3089  fNotify = true;
3090  fInitialBlockDownload = chainstate.IsInitialBlockDownload();
3091  pindexHeaderOld = pindexHeader;
3092  }
3093  }
3094 
3095  // Send block tip changed notifications without cs_main
3096  if (fNotify) {
3097  uiInterface.NotifyHeaderTip(
3098  GetSynchronizationState(fInitialBlockDownload),
3099  pindexHeader->nHeight, pindexHeader->nTime, false);
3100  }
3101  return fNotify;
3102 }
3103 
3106 
3107  if (GetMainSignals().CallbacksPending() > 10) {
3109  }
3110 }
3111 
3113  BlockValidationState &state,
3114  std::shared_ptr<const CBlock> pblock) {
3116 
3117  // Note that while we're often called here from ProcessNewBlock, this is
3118  // far from a guarantee. Things in the P2P/RPC will often end up calling
3119  // us in the middle of ProcessNewBlock - do not assume pblock is set
3120  // sanely for performance or correctness!
3122 
3123  // ABC maintains a fair degree of expensive-to-calculate internal state
3124  // because this function periodically releases cs_main so that it does not
3125  // lock up other threads for too long during large connects - and to allow
3126  // for e.g. the callback queue to drain we use m_chainstate_mutex to enforce
3127  // mutual exclusion so that only one caller may execute this function at a
3128  // time
3130 
3131  // Belt-and-suspenders check that we aren't attempting to advance the
3132  // background chainstate past the snapshot base block.
3133  if (WITH_LOCK(::cs_main, return m_disabled)) {
3134  LogPrintf("m_disabled is set - this chainstate should not be in "
3135  "operation. Please report this as a bug. %s\n",
3136  PACKAGE_BUGREPORT);
3137  return false;
3138  }
3139 
3140  CBlockIndex *pindexMostWork = nullptr;
3141  CBlockIndex *pindexNewTip = nullptr;
3142  int nStopAtHeight = gArgs.GetIntArg("-stopatheight", DEFAULT_STOPATHEIGHT);
3143  do {
3144  // Block until the validation queue drains. This should largely
3145  // never happen in normal operation, however may happen during
3146  // reindex, causing memory blowup if we run too far ahead.
3147  // Note that if a validationinterface callback ends up calling
3148  // ActivateBestChain this may lead to a deadlock! We should
3149  // probably have a DEBUG_LOCKORDER test for this in the future.
3151 
3152  std::vector<const CBlockIndex *> blocksToReconcile;
3153  bool blocks_connected = false;
3154 
3155  {
3156  LOCK(cs_main);
3157  // Lock transaction pool for at least as long as it takes for
3158  // connectTrace to be consumed
3159  LOCK(MempoolMutex());
3160  CBlockIndex *starting_tip = m_chain.Tip();
3161  do {
3162  // We absolutely may not unlock cs_main until we've made forward
3163  // progress (with the exception of shutdown due to hardware
3164  // issues, low disk space, etc).
3165 
3166  // Destructed before cs_main is unlocked
3167  ConnectTrace connectTrace;
3168 
3169  if (pindexMostWork == nullptr) {
3170  pindexMostWork = FindMostWorkChain(blocksToReconcile);
3171  }
3172 
3173  // Whether we have anything to do at all.
3174  if (pindexMostWork == nullptr ||
3175  pindexMostWork == m_chain.Tip()) {
3176  break;
3177  }
3178 
3179  bool fInvalidFound = false;
3180  std::shared_ptr<const CBlock> nullBlockPtr;
3181  if (!ActivateBestChainStep(
3182  config, state, pindexMostWork,
3183  pblock && pblock->GetHash() ==
3184  pindexMostWork->GetBlockHash()
3185  ? pblock
3186  : nullBlockPtr,
3187  fInvalidFound, connectTrace)) {
3188  // A system error occurred
3189  return false;
3190  }
3191  blocks_connected = true;
3192 
3193  if (fInvalidFound ||
3194  (pindexMostWork && pindexMostWork->nStatus.isParked())) {
3195  // Wipe cache, we may need another branch now.
3196  pindexMostWork = nullptr;
3197  }
3198 
3199  pindexNewTip = m_chain.Tip();
3200  for (const PerBlockConnectTrace &trace :
3201  connectTrace.GetBlocksConnected()) {
3202  assert(trace.pblock && trace.pindex);
3203  GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
3204  }
3205 
3206  // This will have been toggled in
3207  // ActivateBestChainStep -> ConnectTip ->
3208  // MaybeCompleteSnapshotValidation, if at all, so we should
3209  // catch it here.
3210  //
3211  // Break this do-while to ensure we don't advance past the base
3212  // snapshot.
3213  if (m_disabled) {
3214  break;
3215  }
3216  } while (!m_chain.Tip() ||
3217  (starting_tip && CBlockIndexWorkComparator()(
3218  m_chain.Tip(), starting_tip)));
3219 
3220  // Check the index once we're done with the above loop, since
3221  // we're going to release cs_main soon. If the index is in a bad
3222  // state now, then it's better to know immediately rather than
3223  // randomly have it cause a problem in a race.
3224  CheckBlockIndex();
3225 
3226  if (blocks_connected) {
3227  const CBlockIndex *pindexFork = m_chain.FindFork(starting_tip);
3228  bool fInitialDownload = IsInitialBlockDownload();
3229 
3230  // Notify external listeners about the new tip.
3231  // Enqueue while holding cs_main to ensure that UpdatedBlockTip
3232  // is called in the order in which blocks are connected
3233  if (pindexFork != pindexNewTip) {
3234  // Notify ValidationInterface subscribers
3235  GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork,
3236  fInitialDownload);
3237 
3238  // Always notify the UI if a new block tip was connected
3239  uiInterface.NotifyBlockTip(
3240  GetSynchronizationState(fInitialDownload),
3241  pindexNewTip);
3242  }
3243  }
3244  }
3245  // When we reach this point, we switched to a new tip (stored in
3246  // pindexNewTip).
3247  if (g_avalanche) {
3248  for (const CBlockIndex *pindex : blocksToReconcile) {
3249  g_avalanche->addToReconcile(pindex);
3250  g_avalanche->computeStakingReward(pindex);
3251  }
3252  }
3253 
3254  if (!blocks_connected) {
3255  return true;
3256  }
3257 
3258  if (nStopAtHeight && pindexNewTip &&
3259  pindexNewTip->nHeight >= nStopAtHeight) {
3260  StartShutdown();
3261  }
3262 
3263  if (WITH_LOCK(::cs_main, return m_disabled)) {
3264  // Background chainstate has reached the snapshot base block, so
3265  // exit.
3266  break;
3267  }
3268 
3269  // We check shutdown only after giving ActivateBestChainStep a chance to
3270  // run once so that we never shutdown before connecting the genesis
3271  // block during LoadChainTip(). Previously this caused an assert()
3272  // failure during shutdown in such cases as the UTXO DB flushing checks
3273  // that the best block hash is non-null.
3274  if (ShutdownRequested()) {
3275  break;
3276  }
3277  } while (pindexNewTip != pindexMostWork);
3278 
3279  // Write changes periodically to disk, after relay.
3281  return false;
3282  }
3283 
3284  return true;
3285 }
3286 
3288  BlockValidationState &state,
3289  CBlockIndex *pindex) {
3292  {
3293  LOCK(cs_main);
3294  if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
3295  // Nothing to do, this block is not at the tip.
3296  return true;
3297  }
3298 
3300  // The chain has been extended since the last call, reset the
3301  // counter.
3303  }
3304 
3306  setBlockIndexCandidates.erase(pindex);
3308  if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
3309  // We can't keep reducing the counter if somebody really wants to
3310  // call preciousblock 2**31-1 times on the same set of tips...
3312  }
3313 
3314  // In case this was parked, unpark it.
3315  UnparkBlock(pindex);
3316 
3317  // Make sure it is added to the candidate list if appropriate.
3318  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3319  pindex->HaveTxsDownloaded()) {
3320  setBlockIndexCandidates.insert(pindex);
3322  }
3323  }
3324 
3325  return ActivateBestChain(config, state);
3326 }
3327 
3328 namespace {
3329 // Leverage RAII to run a functor at scope end
3330 template <typename Func> struct Defer {
3331  Func func;
3332  Defer(Func &&f) : func(std::move(f)) {}
3333  ~Defer() { func(); }
3334 };
3335 } // namespace
3336 
3338  CBlockIndex *pindex, bool invalidate) {
3339  // Genesis block can't be invalidated or parked
3340  assert(pindex);
3341  if (pindex->nHeight == 0) {
3342  return false;
3343  }
3344 
3345  CBlockIndex *to_mark_failed_or_parked = pindex;
3346  bool pindex_was_in_chain = false;
3347  int disconnected = 0;
3348 
3349  // We do not allow ActivateBestChain() to run while UnwindBlock() is
3350  // running, as that could cause the tip to change while we disconnect
3351  // blocks. (Note for backport of Core PR16849: we acquire
3352  // LOCK(m_chainstate_mutex) in the Park, Invalidate and FinalizeBlock
3353  // functions due to differences in our code)
3355 
3356  // We'll be acquiring and releasing cs_main below, to allow the validation
3357  // callbacks to run. However, we should keep the block index in a
3358  // consistent state as we disconnect blocks -- in particular we need to
3359  // add equal-work blocks to setBlockIndexCandidates as we disconnect.
3360  // To avoid walking the block index repeatedly in search of candidates,
3361  // build a map once so that we can look up candidate blocks by chain
3362  // work as we go.
3363  std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
3364 
3365  {
3366  LOCK(cs_main);
3367  for (auto &entry : m_blockman.m_block_index) {
3368  CBlockIndex *candidate = &entry.second;
3369  // We don't need to put anything in our active chain into the
3370  // multimap, because those candidates will be found and considered
3371  // as we disconnect.
3372  // Instead, consider only non-active-chain blocks that have at
3373  // least as much work as where we expect the new tip to end up.
3374  if (!m_chain.Contains(candidate) &&
3375  !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
3376  candidate->IsValid(BlockValidity::TRANSACTIONS) &&
3377  candidate->HaveTxsDownloaded()) {
3378  candidate_blocks_by_work.insert(
3379  std::make_pair(candidate->nChainWork, candidate));
3380  }
3381  }
3382  }
3383 
3384  {
3385  LOCK(cs_main);
3386  // Lock for as long as disconnectpool is in scope to make sure
3387  // UpdateMempoolForReorg is called after DisconnectTip without unlocking
3388  // in between
3389  LOCK(MempoolMutex());
3390 
3391  constexpr int maxDisconnectPoolBlocks = 10;
3392  bool ret = false;
3393  DisconnectedBlockTransactions disconnectpool;
3394  // After 10 blocks this becomes nullptr, so that DisconnectTip will
3395  // stop giving us unwound block txs if we are doing a deep unwind.
3396  DisconnectedBlockTransactions *optDisconnectPool = &disconnectpool;
3397 
3398  // Disable thread safety analysis because we can't require m_mempool->cs
3399  // as m_mempool can be null. We keep the runtime analysis though.
3400  Defer deferred([&]() NO_THREAD_SAFETY_ANALYSIS {
3402  if (m_mempool && !disconnectpool.isEmpty()) {
3404  // DisconnectTip will add transactions to disconnectpool.
3405  // When all unwinding is done and we are on a new tip, we must
3406  // add all transactions back to the mempool against the new tip.
3407  disconnectpool.updateMempoolForReorg(config, *this,
3408  /* fAddToMempool = */ ret,
3409  *m_mempool);
3410  }
3411  });
3412 
3413  // Disconnect (descendants of) pindex, and mark them invalid.
3414  while (true) {
3415  if (ShutdownRequested()) {
3416  break;
3417  }
3418 
3419  // Make sure the queue of validation callbacks doesn't grow
3420  // unboundedly.
3421  // FIXME this commented code is a regression and could cause OOM if
3422  // a very old block is invalidated via the invalidateblock RPC.
3423  // This can be uncommented if the main signals are moved away from
3424  // cs_main or this code is refactored so that cs_main can be
3425  // released at this point.
3426  //
3427  // LimitValidationInterfaceQueue();
3428 
3429  if (!m_chain.Contains(pindex)) {
3430  break;
3431  }
3432 
3433  if (m_mempool && disconnected == 0) {
3434  // On first iteration, we grab all the mempool txs to preserve
3435  // topological ordering. This has the side-effect of temporarily
3436  // clearing the mempool, but we will re-add later in
3437  // updateMempoolForReorg() (above). This technique guarantees
3438  // mempool consistency as well as ensures that our topological
3439  // entry_id index is always correct.
3440  disconnectpool.importMempool(*m_mempool);
3441  }
3442 
3443  pindex_was_in_chain = true;
3444  CBlockIndex *invalid_walk_tip = m_chain.Tip();
3445 
3446  // ActivateBestChain considers blocks already in m_chain
3447  // unconditionally valid already, so force disconnect away from it.
3448 
3449  ret = DisconnectTip(state, optDisconnectPool);
3450  ++disconnected;
3451 
3452  if (optDisconnectPool && disconnected > maxDisconnectPoolBlocks) {
3453  // Stop using the disconnect pool after 10 blocks. After 10
3454  // blocks we no longer add block tx's to the disconnectpool.
3455  // However, when this scope ends we will reconcile what's
3456  // in the pool with the new tip (in the deferred d'tor above).
3457  optDisconnectPool = nullptr;
3458  }
3459 
3460  if (!ret) {
3461  return false;
3462  }
3463 
3464  assert(invalid_walk_tip->pprev == m_chain.Tip());
3465 
3466  // We immediately mark the disconnected blocks as invalid.
3467  // This prevents a case where pruned nodes may fail to
3468  // invalidateblock and be left unable to start as they have no tip
3469  // candidates (as there are no blocks that meet the "have data and
3470  // are not invalid per nStatus" criteria for inclusion in
3471  // setBlockIndexCandidates).
3472 
3473  invalid_walk_tip->nStatus =
3474  invalidate ? invalid_walk_tip->nStatus.withFailed()
3475  : invalid_walk_tip->nStatus.withParked();
3476 
3477  m_blockman.m_dirty_blockindex.insert(invalid_walk_tip);
3478  setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
3479 
3480  if (invalid_walk_tip == to_mark_failed_or_parked->pprev &&
3481  (invalidate ? to_mark_failed_or_parked->nStatus.hasFailed()
3482  : to_mark_failed_or_parked->nStatus.isParked())) {
3483  // We only want to mark the last disconnected block as
3484  // Failed (or Parked); its children need to be FailedParent (or
3485  // ParkedParent) instead.
3486  to_mark_failed_or_parked->nStatus =
3487  (invalidate
3488  ? to_mark_failed_or_parked->nStatus.withFailed(false)
3489  .withFailedParent()
3490  : to_mark_failed_or_parked->nStatus.withParked(false)
3491  .withParkedParent());
3492 
3493  m_blockman.m_dirty_blockindex.insert(to_mark_failed_or_parked);
3494  }
3495 
3496  // Add any equal or more work headers to setBlockIndexCandidates
3497  auto candidate_it = candidate_blocks_by_work.lower_bound(
3498  invalid_walk_tip->pprev->nChainWork);
3499  while (candidate_it != candidate_blocks_by_work.end()) {
3500  if (!CBlockIndexWorkComparator()(candidate_it->second,
3501  invalid_walk_tip->pprev)) {
3502  setBlockIndexCandidates.insert(candidate_it->second);
3503  candidate_it = candidate_blocks_by_work.erase(candidate_it);
3504  } else {
3505  ++candidate_it;
3506  }
3507  }
3508 
3509  // Track the last disconnected block, so we can correct its
3510  // FailedParent (or ParkedParent) status in future iterations, or,
3511  // if it's the last one, call InvalidChainFound on it.
3512  to_mark_failed_or_parked = invalid_walk_tip;
3513  }
3514  }
3515 
3516  CheckBlockIndex();
3517 
3518  {
3519  LOCK(cs_main);
3520  if (m_chain.Contains(to_mark_failed_or_parked)) {
3521  // If the to-be-marked invalid block is in the active chain,
3522  // something is interfering and we can't proceed.
3523  return false;
3524  }
3525 
3526  // Mark pindex (or the last disconnected block) as invalid (or parked),
3527  // even when it never was in the main chain.
3528  to_mark_failed_or_parked->nStatus =
3529  invalidate ? to_mark_failed_or_parked->nStatus.withFailed()
3530  : to_mark_failed_or_parked->nStatus.withParked();
3531  m_blockman.m_dirty_blockindex.insert(to_mark_failed_or_parked);
3532  if (invalidate) {
3533  m_chainman.m_failed_blocks.insert(to_mark_failed_or_parked);
3534  }
3535 
3536  // If any new blocks somehow arrived while we were disconnecting
3537  // (above), then the pre-calculation of what should go into
3538  // setBlockIndexCandidates may have missed entries. This would
3539  // technically be an inconsistency in the block index, but if we clean
3540  // it up here, this should be an essentially unobservable error.
3541  // Loop back over all block index entries and add any missing entries
3542  // to setBlockIndexCandidates.
3543  for (auto &[_, block_index] : m_blockman.m_block_index) {
3544  if (block_index.IsValid(BlockValidity::TRANSACTIONS) &&
3545  block_index.HaveTxsDownloaded() &&
3546  !setBlockIndexCandidates.value_comp()(&block_index,
3547  m_chain.Tip())) {
3548  setBlockIndexCandidates.insert(&block_index);
3549  }
3550  }
3551 
3552  if (invalidate) {
3553  InvalidChainFound(to_mark_failed_or_parked);
3554  }
3555  }
3556 
3557  // Only notify about a new block tip if the active chain was modified.
3558  if (pindex_was_in_chain) {
3559  uiInterface.NotifyBlockTip(
3561  to_mark_failed_or_parked->pprev);
3562  }
3563  return true;
3564 }
3565 
3567  BlockValidationState &state,
3568  CBlockIndex *pindex) {
3571  // See 'Note for backport of Core PR16849' in Chainstate::UnwindBlock
3573 
3574  return UnwindBlock(config, state, pindex, true);
3575 }
3576 
3578  CBlockIndex *pindex) {
3581  // See 'Note for backport of Core PR16849' in Chainstate::UnwindBlock
3583 
3584  return UnwindBlock(config, state, pindex, false);
3585 }
3586 
3587 template <typename F>
3589  CBlockIndex *pindex, F f) {
3590  BlockStatus newStatus = f(pindex->nStatus);
3591  if (pindex->nStatus != newStatus &&
3592  (!pindexBase ||
3593  pindex->GetAncestor(pindexBase->nHeight) == pindexBase)) {
3594  pindex->nStatus = newStatus;
3595  m_blockman.m_dirty_blockindex.insert(pindex);
3596  if (newStatus.isValid()) {
3597  m_chainman.m_failed_blocks.erase(pindex);
3598  }
3599 
3600  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3601  pindex->HaveTxsDownloaded() &&
3602  setBlockIndexCandidates.value_comp()(m_chain.Tip(), pindex)) {
3603  setBlockIndexCandidates.insert(pindex);
3604  }
3605  return true;
3606  }
3607  return false;
3608 }
3609 
3610 template <typename F, typename C, typename AC>
3612  F f, C fChild, AC fAncestorWasChanged) {
3614 
3615  // Update the current block and ancestors; while we're doing this, identify
3616  // which was the deepest ancestor we changed.
3617  CBlockIndex *pindexDeepestChanged = pindex;
3618  for (auto pindexAncestor = pindex; pindexAncestor != nullptr;
3619  pindexAncestor = pindexAncestor->pprev) {
3620  if (UpdateFlagsForBlock(nullptr, pindexAncestor, f)) {
3621  pindexDeepestChanged = pindexAncestor;
3622  }
3623  }
3624 
3625  if (pindexReset &&
3626  pindexReset->GetAncestor(pindexDeepestChanged->nHeight) ==
3627  pindexDeepestChanged) {
3628  // reset pindexReset if it had a modified ancestor.
3629  pindexReset = nullptr;
3630  }
3631 
3632  // Update all blocks under modified blocks.
3633  for (auto &[_, block_index] : m_blockman.m_block_index) {
3634  UpdateFlagsForBlock(pindex, &block_index, fChild);
3635  UpdateFlagsForBlock(pindexDeepestChanged, &block_index,
3636  fAncestorWasChanged);
3637  }
3638 }
3639 
3642 
3643  UpdateFlags(
3644  pindex, m_chainman.m_best_invalid,
3645  [](const BlockStatus status) {
3646  return status.withClearedFailureFlags();
3647  },
3648  [](const BlockStatus status) {
3649  return status.withClearedFailureFlags();
3650  },
3651  [](const BlockStatus status) {
3652  return status.withFailedParent(false);
3653  });
3654 }
3655 
3656 void Chainstate::UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren) {
3658 
3659  UpdateFlags(
3660  pindex, m_chainman.m_best_parked,
3661  [](const BlockStatus status) {
3662  return status.withClearedParkedFlags();
3663  },
3664  [fClearChildren](const BlockStatus status) {
3665  return fClearChildren ? status.withClearedParkedFlags()
3666  : status.withParkedParent(false);
3667  },
3668  [](const BlockStatus status) {
3669  return status.withParkedParent(false);
3670  });
3671 }
3672 
3674  return UnparkBlockImpl(pindex, true);
3675 }
3676 
3678  return UnparkBlockImpl(pindex, false);
3679 }
3680 
3682  if (!pindex) {
3683  return false;
3684  }
3685 
3686  if (!m_chain.Contains(pindex)) {
3688  "The block to mark finalized by avalanche is not on the "
3689  "active chain: %s\n",
3690  pindex->GetBlockHash().ToString());
3691  return false;
3692  }
3693 
3694  if (g_avalanche) {
3695  g_avalanche->cleanupStakingRewards(pindex->nHeight);
3696  }
3697 
3698  if (IsBlockAvalancheFinalized(pindex)) {
3699  return true;
3700  }
3701 
3702  {
3704  m_avalancheFinalizedBlockIndex = pindex;
3705  }
3706 
3707  GetMainSignals().BlockFinalized(pindex);
3708 
3709  return true;
3710 }
3711 
3714  m_avalancheFinalizedBlockIndex = nullptr;
3715 }
3716 
3719  return pindex && m_avalancheFinalizedBlockIndex &&
3720  m_avalancheFinalizedBlockIndex->GetAncestor(pindex->nHeight) ==
3721  pindex;
3722 }
3723 
3729  CBlockIndex *pindexNew,
3730  const FlatFilePos &pos) {
3731  pindexNew->nTx = block.vtx.size();
3732  pindexNew->nSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
3733  pindexNew->nFile = pos.nFile;
3734  pindexNew->nDataPos = pos.nPos;
3735  pindexNew->nUndoPos = 0;
3736  pindexNew->nStatus = pindexNew->nStatus.withData();
3738  m_blockman.m_dirty_blockindex.insert(pindexNew);
3739 
3740  if (pindexNew->UpdateChainStats()) {
3741  // If pindexNew is the genesis block or all parents are
3742  // BLOCK_VALID_TRANSACTIONS.
3743  std::deque<CBlockIndex *> queue;
3744  queue.push_back(pindexNew);
3745 
3746  // Recursively process any descendant blocks that now may be eligible to
3747  // be connected.
3748  while (!queue.empty()) {
3749  CBlockIndex *pindex = queue.front();
3750  queue.pop_front();
3751  pindex->UpdateChainStats();
3752  if (pindex->nSequenceId == 0) {
3753  // We assign a sequence is when transaction are received to
3754  // prevent a miner from being able to broadcast a block but not
3755  // its content. However, a sequence id may have been set
3756  // manually, for instance via PreciousBlock, in which case, we
3757  // don't need to assign one.
3758  pindex->nSequenceId = nBlockSequenceId++;
3759  }
3760 
3761  if (m_chain.Tip() == nullptr ||
3762  !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
3763  setBlockIndexCandidates.insert(pindex);
3764  }
3765 
3766  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
3767  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
3768  range = m_blockman.m_blocks_unlinked.equal_range(pindex);
3769  while (range.first != range.second) {
3770  std::multimap<CBlockIndex *, CBlockIndex *>::iterator it =
3771  range.first;
3772  queue.push_back(it->second);
3773  range.first++;
3774  m_blockman.m_blocks_unlinked.erase(it);
3775  }
3776  }
3777  } else if (pindexNew->pprev &&
3778  pindexNew->pprev->IsValid(BlockValidity::TREE)) {
3780  std::make_pair(pindexNew->pprev, pindexNew));
3781  }
3782 }
3783 
3792 static bool CheckBlockHeader(const CBlockHeader &block,
3793  BlockValidationState &state,
3794  const Consensus::Params &params,
3795  BlockValidationOptions validationOptions) {
3796  // Check proof of work matches claimed amount
3797  if (validationOptions.shouldValidatePoW() &&
3798  !CheckProofOfWork(block.GetHash(), block.nBits, params)) {
3800  "high-hash", "proof of work failed");
3801  }
3802 
3803  return true;
3804 }
3805 
3806 bool CheckBlock(const CBlock &block, BlockValidationState &state,
3807  const Consensus::Params &params,
3808  BlockValidationOptions validationOptions) {
3809  // These are checks that are independent of context.
3810  if (block.fChecked) {
3811  return true;
3812  }
3813 
3814  // Check that the header is valid (particularly PoW). This is mostly
3815  // redundant with the call in AcceptBlockHeader.
3816  if (!CheckBlockHeader(block, state, params, validationOptions)) {
3817  return false;
3818  }
3819 
3820  // Check the merkle root.
3821  if (validationOptions.shouldValidateMerkleRoot()) {
3822  bool mutated;
3823  uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3824  if (block.hashMerkleRoot != hashMerkleRoot2) {
3826  "bad-txnmrklroot", "hashMerkleRoot mismatch");
3827  }
3828 
3829  // Check for merkle tree malleability (CVE-2012-2459): repeating
3830  // sequences of transactions in a block without affecting the merkle
3831  // root of a block, while still invalidating it.
3832  if (mutated) {
3834  "bad-txns-duplicate", "duplicate transaction");
3835  }
3836  }
3837 
3838  // All potential-corruption validation must be done before we do any
3839  // transaction validation, as otherwise we may mark the header as invalid
3840  // because we receive the wrong transactions for it.
3841 
3842  // First transaction must be coinbase.
3843  if (block.vtx.empty()) {
3845  "bad-cb-missing", "first tx is not coinbase");
3846  }
3847 
3848  // Size limits.
3849  auto nMaxBlockSize = validationOptions.getExcessiveBlockSize();
3850 
3851  // Bail early if there is no way this block is of reasonable size.
3852  if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) {
3854  "bad-blk-length", "size limits failed");
3855  }
3856 
3857  auto currentBlockSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
3858  if (currentBlockSize > nMaxBlockSize) {
3860  "bad-blk-length", "size limits failed");
3861  }
3862 
3863  // And a valid coinbase.
3864  TxValidationState tx_state;
3865  if (!CheckCoinbase(*block.vtx[0], tx_state)) {
3867  tx_state.GetRejectReason(),
3868  strprintf("Coinbase check failed (txid %s) %s",
3869  block.vtx[0]->GetId().ToString(),
3870  tx_state.GetDebugMessage()));
3871  }
3872 
3873  // Check transactions for regularity, skipping the first. Note that this
3874  // is the first time we check that all after the first are !IsCoinBase.
3875  for (size_t i = 1; i < block.vtx.size(); i++) {
3876  auto *tx = block.vtx[i].get();
3877  if (!CheckRegularTransaction(*tx, tx_state)) {
3878  return state.Invalid(
3880  tx_state.GetRejectReason(),
3881  strprintf("Transaction check failed (txid %s) %s",
3882  tx->GetId().ToString(), tx_state.GetDebugMessage()));
3883  }
3884  }
3885 
3886  if (validationOptions.shouldValidatePoW() &&
3887  validationOptions.shouldValidateMerkleRoot()) {
3888  block.fChecked = true;
3889  }
3890 
3891  return true;
3892 }
3893 
3894 bool HasValidProofOfWork(const std::vector<CBlockHeader> &headers,
3895  const Consensus::Params &consensusParams) {
3896  return std::all_of(headers.cbegin(), headers.cend(),
3897  [&](const auto &header) {
3898  return CheckProofOfWork(
3899  header.GetHash(), header.nBits, consensusParams);
3900  });
3901 }
3902 
3903 arith_uint256 CalculateHeadersWork(const std::vector<CBlockHeader> &headers) {
3904  arith_uint256 total_work{0};
3905  for (const CBlockHeader &header : headers) {
3906  CBlockIndex dummy(header);
3907  total_work += GetBlockProof(dummy);
3908  }
3909  return total_work;
3910 }
3911 
3922 static bool ContextualCheckBlockHeader(const CChainParams &params,
3923  const CBlockHeader &block,
3924  BlockValidationState &state,
3925  BlockManager &blockman,
3926  const CBlockIndex *pindexPrev,
3930  assert(pindexPrev != nullptr);
3931  const int nHeight = pindexPrev->nHeight + 1;
3932 
3933  // Check proof of work
3934  if (block.nBits != GetNextWorkRequired(pindexPrev, &block, params)) {
3935  LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight);
3937  "bad-diffbits", "incorrect proof of work");
3938  }
3939 
3940  // Check against checkpoints
3941  if (fCheckpointsEnabled) {
3942  const CCheckpointData &checkpoints = params.Checkpoints();
3943 
3944  // Check that the block chain matches the known block chain up to a
3945  // checkpoint.
3946  if (!Checkpoints::CheckBlock(checkpoints, nHeight, block.GetHash())) {
3948  "ERROR: %s: rejected by checkpoint lock-in at %d\n",
3949  __func__, nHeight);
3951  "checkpoint mismatch");
3952  }
3953 
3954  // Don't accept any forks from the main chain prior to last checkpoint.
3955  // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's
3956  // in our BlockIndex().
3957 
3958  const CBlockIndex *pcheckpoint =
3959  blockman.GetLastCheckpoint(checkpoints);
3960  if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3962  "ERROR: %s: forked chain older than last checkpoint "
3963  "(height %d)\n",
3964  __func__, nHeight);
3966  "bad-fork-prior-to-checkpoint");
3967  }
3968  }
3969 
3970  // Check timestamp against prev
3971  if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) {
3973  "time-too-old", "block's timestamp is too early");
3974  }
3975 
3976  // Check timestamp
3977  if (block.Time() > now + std::chrono::seconds{MAX_FUTURE_BLOCK_TIME}) {
3979  "time-too-new",
3980  "block timestamp too far in the future");
3981  }
3982 
3983  const Consensus::Params &consensusParams = params.GetConsensus();
3984  // Reject blocks with outdated version
3985  if ((block.nVersion < 2 &&
3986  DeploymentActiveAfter(pindexPrev, consensusParams,
3988  (block.nVersion < 3 &&
3989  DeploymentActiveAfter(pindexPrev, consensusParams,
3991  (block.nVersion < 4 &&
3992  DeploymentActiveAfter(pindexPrev, consensusParams,
3994  return state.Invalid(
3996  strprintf("bad-version(0x%08x)", block.nVersion),
3997  strprintf("rejected nVersion=0x%08x block", block.nVersion));
3998  }
3999 
4000  return true;
4001 }
4002 
4004  const CBlockIndex *active_chain_tip, const Consensus::Params &params,
4005  const CTransaction &tx, TxValidationState &state) {
4007  // TODO: Make active_chain_tip a reference
4008  assert(active_chain_tip);
4009 
4010  // ContextualCheckTransactionForCurrentBlock() uses
4011  // active_chain_tip.Height()+1 to evaluate nLockTime because when
4012  // IsFinalTx() is called within AcceptBlock(), the height of the
4013  // block *being* evaluated is what is used. Thus if we want to know if a
4014  // transaction can be part of the *next* block, we need to call
4015  // ContextualCheckTransaction() with one more than
4016  // active_chain_tip.Height().
4017  const int nBlockHeight = active_chain_tip->nHeight + 1;
4018 
4019  // BIP113 will require that time-locked transactions have nLockTime set to
4020  // less than the median time of the previous block they're contained in.
4021  // When the next block is created its previous block will be the current
4022  // chain tip, so we use that to calculate the median time passed to
4023  // ContextualCheckTransaction().
4024  // This time can also be used for consensus upgrades.
4025  const int64_t nMedianTimePast{active_chain_tip->GetMedianTimePast()};
4026 
4027  return ContextualCheckTransaction(params, tx, state, nBlockHeight,
4028  nMedianTimePast);
4029 }
4030 
4038 static bool ContextualCheckBlock(const CBlock &block,
4039  BlockValidationState &state,
4040  const Consensus::Params &params,
4041  const CBlockIndex *pindexPrev) {
4042  const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
4043 
4044  // Enforce BIP113 (Median Time Past).
4045  bool enforce_locktime_median_time_past{false};
4046  if (DeploymentActiveAfter(pindexPrev, params, Consensus::DEPLOYMENT_CSV)) {
4047  assert(pindexPrev != nullptr);
4048  enforce_locktime_median_time_past = true;
4049  }
4050 
4051  const int64_t nMedianTimePast =
4052  pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast();
4053 
4054  const int64_t nLockTimeCutoff{enforce_locktime_median_time_past
4055  ? nMedianTimePast
4056  : block.GetBlockTime()};
4057 
4058  const bool fIsMagneticAnomalyEnabled =
4059  IsMagneticAnomalyEnabled(params, pindexPrev);
4060 
4061  // Check transactions:
4062  // - canonical ordering
4063  // - ensure they are finalized
4064  // - check they have the minimum size
4065  const CTransaction *prevTx = nullptr;
4066  for (const auto &ptx : block.vtx) {
4067  const CTransaction &tx = *ptx;
4068  if (fIsMagneticAnomalyEnabled) {
4069  if (prevTx && (tx.GetId() <= prevTx->GetId())) {
4070  if (tx.GetId() == prevTx->GetId()) {
4072  "tx-duplicate",
4073  strprintf("Duplicated transaction %s",
4074  tx.GetId().ToString()));
4075  }
4076 
4077  return state.Invalid(
4079  strprintf("Transaction order is invalid (%s < %s)",
4080  tx.GetId().ToString(),
4081  prevTx->GetId().ToString()));
4082  }
4083 
4084  if (prevTx || !tx.IsCoinBase()) {
4085  prevTx = &tx;
4086  }
4087  }
4088 
4089  TxValidationState tx_state;
4090  if (!ContextualCheckTransaction(params, tx, tx_state, nHeight,
4091  nLockTimeCutoff)) {
4093  tx_state.GetRejectReason(),
4094  tx_state.GetDebugMessage());
4095  }
4096  }
4097 
4098  // Enforce rule that the coinbase starts with serialized block height
4099  if (DeploymentActiveAfter(pindexPrev, params,
4101  CScript expect = CScript() << nHeight;
4102  if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
4103  !std::equal(expect.begin(), expect.end(),
4104  block.vtx[0]->vin[0].scriptSig.begin())) {
4106  "bad-cb-height",
4107  "block height mismatch in coinbase");
4108  }
4109  }
4110 
4111  return true;
4112 }
4113 
4120  const CBlockHeader &block,
4121  BlockValidationState &state,
4122  CBlockIndex **ppindex,
4123  bool min_pow_checked) {
4125  const CChainParams &chainparams = config.GetChainParams();
4126 
4127  // Check for duplicate
4128  BlockHash hash = block.GetHash();
4129  BlockMap::iterator miSelf{m_blockman.m_block_index.find(hash)};
4130  if (hash != chainparams.GetConsensus().hashGenesisBlock) {
4131  if (miSelf != m_blockman.m_block_index.end()) {
4132  // Block header is already known.
4133  CBlockIndex *pindex = &(miSelf->second);
4134  if (ppindex) {
4135  *ppindex = pindex;
4136  }
4137 
4138  if (pindex->nStatus.isInvalid()) {
4139  LogPrint(BCLog::VALIDATION, "%s: block %s is marked invalid\n",
4140  __func__, hash.ToString());
4141  return state.Invalid(
4143  }
4144 
4145  return true;
4146  }
4147 
4148  if (!CheckBlockHeader(block, state, chainparams.GetConsensus(),
4149  BlockValidationOptions(config))) {
4151  "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__,
4152  hash.ToString(), state.ToString());
4153  return false;
4154  }
4155 
4156  // Get prev block index
4157  BlockMap::iterator mi{
4158  m_blockman.m_block_index.find(block.hashPrevBlock)};
4159  if (mi == m_blockman.m_block_index.end()) {
4161  "header %s has prev block not found: %s\n",
4162  hash.ToString(), block.hashPrevBlock.ToString());
4164  "prev-blk-not-found");
4165  }
4166 
4167  CBlockIndex *pindexPrev = &((*mi).second);
4168  assert(pindexPrev);
4169  if (pindexPrev->nStatus.isInvalid()) {
4171  "header %s has prev block invalid: %s\n", hash.ToString(),
4172  block.hashPrevBlock.ToString());
4174  "bad-prevblk");
4175  }
4176 
4177  if (!ContextualCheckBlockHeader(chainparams, block, state, m_blockman,
4178  pindexPrev,
4181  "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n",
4182  __func__, hash.ToString(), state.ToString());
4183  return false;
4184  }
4185 
4186  /* Determine if this block descends from any block which has been found
4187  * invalid (m_failed_blocks), then mark pindexPrev and any blocks
4188  * between them as failed. For example:
4189  *
4190  * D3
4191  * /
4192  * B2 - C2
4193  * / \
4194  * A D2 - E2 - F2
4195  * \
4196  * B1 - C1 - D1 - E1
4197  *
4198  * In the case that we attempted to reorg from E1 to F2, only to find
4199  * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
4200  * but NOT D3 (it was not in any of our candidate sets at the time).
4201  *
4202  * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
4203  * in LoadBlockIndex.
4204  */
4205  if (!pindexPrev->IsValid(BlockValidity::SCRIPTS)) {
4206  // The above does not mean "invalid": it checks if the previous
4207  // block hasn't been validated up to BlockValidity::SCRIPTS. This is
4208  // a performance optimization, in the common case of adding a new
4209  // block to the tip, we don't need to iterate over the failed blocks
4210  // list.
4211  for (const CBlockIndex *failedit : m_failed_blocks) {
4212  if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
4213  assert(failedit->nStatus.hasFailed());
4214  CBlockIndex *invalid_walk = pindexPrev;
4215  while (invalid_walk != failedit) {
4216  invalid_walk->nStatus =
4217  invalid_walk->nStatus.withFailedParent();
4218  m_blockman.m_dirty_blockindex.insert(invalid_walk);
4219  invalid_walk = invalid_walk->pprev;
4220  }
4222  "header %s has prev block invalid: %s\n",
4223  hash.ToString(), block.hashPrevBlock.ToString());
4224  return state.Invalid(
4226  "bad-prevblk");
4227  }
4228  }
4229  }
4230  }
4231  if (!min_pow_checked) {
4233  "%s: not adding new block header %s, missing anti-dos "
4234  "proof-of-work validation\n",
4235  __func__, hash.ToString());
4237  "too-little-chainwork");
4238  }
4239  CBlockIndex *pindex{m_blockman.AddToBlockIndex(block, m_best_header)};
4240 
4241  if (ppindex) {
4242  *ppindex = pindex;
4243  }
4244 
4245  return true;
4246 }
4247 
4248 // Exposed wrapper for AcceptBlockHeader
4250  const Config &config, const std::vector<CBlockHeader> &headers,
4251  bool min_pow_checked, BlockValidationState &state,
4252  const CBlockIndex **ppindex) {
4254  {
4255  LOCK(cs_main);
4256  for (const CBlockHeader &header : headers) {
4257  // Use a temp pindex instead of ppindex to avoid a const_cast
4258  CBlockIndex *pindex = nullptr;
4259  bool accepted = AcceptBlockHeader(config, header, state, &pindex,
4260  min_pow_checked);
4261  ActiveChainstate().CheckBlockIndex();
4262 
4263  if (!accepted) {
4264  return false;
4265  }
4266 
4267  if (ppindex) {
4268  *ppindex = pindex;
4269  }
4270  }
4271  }
4272 
4273  if (NotifyHeaderTip(ActiveChainstate())) {
4274  if (ActiveChainstate().IsInitialBlockDownload() && ppindex &&
4275  *ppindex) {
4276  const CBlockIndex &last_accepted{**ppindex};
4277  const int64_t blocks_left{
4278  (GetTime() - last_accepted.GetBlockTime()) /
4280  const double progress{100.0 * last_accepted.nHeight /
4281  (last_accepted.nHeight + blocks_left)};
4282  LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n",
4283  last_accepted.nHeight, progress);
4284  }
4285  }
4286  return true;
4287 }
4288 
4289 void ChainstateManager::ReportHeadersPresync(const arith_uint256 &work,
4290  int64_t height,
4291  int64_t timestamp) {
4293  const auto &chainstate = ActiveChainstate();
4294  {
4295  LOCK(cs_main);
4296  // Don't report headers presync progress if we already have a
4297  // post-minchainwork header chain.
4298  // This means we lose reporting for potentially legimate, but unlikely,
4299  // deep reorgs, but prevent attackers that spam low-work headers from
4300  // filling our logs.
4301  if (m_best_header->nChainWork >=
4303  return;
4304  }
4305  // Rate limit headers presync updates to 4 per second, as these are not
4306  // subject to DoS protection.
4307  auto now = Now<SteadyMilliseconds>();
4308  if (now < m_last_presync_update + 250ms) {
4309  return;
4310  }
4311  m_last_presync_update = now;
4312  }
4313  bool initial_download = chainstate.IsInitialBlockDownload();
4314  uiInterface.NotifyHeaderTip(GetSynchronizationState(initial_download),
4315  height, timestamp, /*presync=*/true);
4316  if (initial_download) {
4317  const int64_t blocks_left{(GetTime() - timestamp) /
4319  const double progress{100.0 * height / (height + blocks_left)};
4320  LogPrintf("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n",
4321  height, progress);
4322  }
4323 }
4324 
4338 bool Chainstate::AcceptBlock(const Config &config,
4339  const std::shared_ptr<const CBlock> &pblock,
4340  BlockValidationState &state, bool fRequested,
4341  const FlatFilePos *dbp, bool *fNewBlock,
4342  bool min_pow_checked) {
4344 
4345  const CBlock &block = *pblock;
4346  if (fNewBlock) {
4347  *fNewBlock = false;
4348  }
4349 
4350  CBlockIndex *pindex = nullptr;
4351 
4352  bool accepted_header{m_chainman.AcceptBlockHeader(
4353  config, block, state, &pindex, min_pow_checked)};
4354  CheckBlockIndex();
4355 
4356  if (!accepted_header) {
4357  return false;
4358  }
4359 
4360  // Try to process all requested blocks that we don't have, but only
4361  // process an unrequested block if it's new and has enough work to
4362  // advance our tip, and isn't too many blocks ahead.
4363  bool fAlreadyHave = pindex->nStatus.hasData();
4364 
4365  // TODO: deal better with return value and error conditions for duplicate
4366  // and unrequested blocks.
4367  if (fAlreadyHave) {
4368  return true;
4369  }
4370 
4371  // Compare block header timestamps and received times of the block and the
4372  // chaintip. If they have the same chain height, use these diffs as a
4373  // tie-breaker, attempting to pick the more honestly-mined block.
4374  int64_t newBlockTimeDiff = std::llabs(pindex->GetReceivedTimeDiff());
4375  int64_t chainTipTimeDiff =
4376  m_chain.Tip() ? std::llabs(m_chain.Tip()->GetReceivedTimeDiff()) : 0;
4377 
4378  bool isSameHeight =
4379  m_chain.Tip() && (pindex->nChainWork == m_chain.Tip()->nChainWork);
4380  if (isSameHeight) {
4381  LogPrintf("Chain tip timestamp-to-received-time difference: hash=%s, "
4382  "diff=%d\n",
4383  m_chain.Tip()->GetBlockHash().ToString(), chainTipTimeDiff);
4384  LogPrintf("New block timestamp-to-received-time difference: hash=%s, "
4385  "diff=%d\n",
4386  pindex->GetBlockHash().ToString(), newBlockTimeDiff);
4387  }
4388 
4389  bool fHasMoreOrSameWork =
4390  (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork
4391  : true);
4392 
4393  // Blocks that are too out-of-order needlessly limit the effectiveness of
4394  // pruning, because pruning will not delete block files that contain any
4395  // blocks which are too close in height to the tip. Apply this test
4396  // regardless of whether pruning is enabled; it should generally be safe to
4397  // not process unrequested blocks.
4398  bool fTooFarAhead{pindex->nHeight >
4399  m_chain.Height() + int(MIN_BLOCKS_TO_KEEP)};
4400 
4401  // TODO: Decouple this function from the block download logic by removing
4402  // fRequested
4403  // This requires some new chain data structure to efficiently look up if a
4404  // block is in a chain leading to a candidate for best tip, despite not
4405  // being such a candidate itself.
4406  // Note that this would break the getblockfrompeer RPC
4407 
4408  // If we didn't ask for it:
4409  if (!fRequested) {
4410  // This is a previously-processed block that was pruned.
4411  if (pindex->nTx != 0) {
4412  return true;
4413  }
4414 
4415  // Don't process less-work chains.
4416  if (!fHasMoreOrSameWork) {
4417  return true;
4418  }
4419 
4420  // Block height is too high.
4421  if (fTooFarAhead) {
4422  return true;
4423  }
4424 
4425  // Protect against DoS attacks from low-work chains.
4426  // If our tip is behind, a peer could try to send us
4427  // low-work blocks on a fake chain that we would never
4428  // request; don't process these.
4429  if (pindex->nChainWork < nMinimumChainWork) {
4430  return true;
4431  }
4432  }
4433 
4434  const CChainParams &params{m_chainman.GetParams()};
4435  const Consensus::Params &consensusParams = params.GetConsensus();
4436 
4437  if (!CheckBlock(block, state, consensusParams,
4438  BlockValidationOptions(config)) ||
4439  !ContextualCheckBlock(block, state, consensusParams, pindex->pprev)) {
4440  if (state.IsInvalid() &&
4442  pindex->nStatus = pindex->nStatus.withFailed();
4443  m_blockman.m_dirty_blockindex.insert(pindex);
4444  }
4445 
4446  return error("%s: %s (block %s)", __func__, state.ToString(),
4447  block.GetHash().ToString());
4448  }
4449 
4450  // If connecting the new block would require rewinding more than one block
4451  // from the active chain (i.e., a "deep reorg"), then mark the new block as
4452  // parked. If it has enough work then it will be automatically unparked
4453  // later, during FindMostWorkChain. We mark the block as parked at the very
4454  // last minute so we can make sure everything is ready to be reorged if
4455  // needed.
4456  if (gArgs.GetBoolArg("-parkdeepreorg", true)) {
4457  const CBlockIndex *pindexFork = m_chain.FindFork(pindex);
4458  if (pindexFork && pindexFork->nHeight + 1 < m_chain.Height()) {
4459  LogPrintf("Park block %s as it would cause a deep reorg.\n",
4460  pindex->GetBlockHash().ToString());
4461  pindex->nStatus = pindex->nStatus.withParked();
4462  m_blockman.m_dirty_blockindex.insert(pindex);
4463  }
4464  }
4465 
4466  // Header is valid/has work and the merkle tree is good.
4467  // Relay now, but if it does not build on our best tip, let the
4468  // SendMessages loop relay it.
4469  if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev) {
4470  GetMainSignals().NewPoWValidBlock(pindex, pblock);
4471  }
4472 
4473  // Write block to history file
4474  if (fNewBlock) {
4475  *fNewBlock = true;
4476  }
4477  try {
4478  FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, pindex->nHeight,
4479  m_chain, params, dbp)};
4480  if (blockPos.IsNull()) {
4481  state.Error(strprintf(
4482  "%s: Failed to find position to write new block to disk",
4483  __func__));
4484  return false;
4485  }
4486  ReceivedBlockTransactions(block, pindex, blockPos);
4487  } catch (const std::runtime_error &e) {
4488  return AbortNode(state, std::string("System error: ") + e.what());
4489  }
4490 
4492 
4493  CheckBlockIndex();
4494 
4495  return true;
4496 }
4497 
4499  const Config &config, const std::shared_ptr<const CBlock> &block,
4500  bool force_processing, bool min_pow_checked, bool *new_block) {
4502 
4503  {
4504  if (new_block) {
4505  *new_block = false;
4506  }
4507 
4508  BlockValidationState state;
4509 
4510  // CheckBlock() does not support multi-threaded block validation
4511  // because CBlock::fChecked can cause data race.
4512  // Therefore, the following critical section must include the
4513  // CheckBlock() call as well.
4514  LOCK(cs_main);
4515 
4516  // Skipping AcceptBlock() for CheckBlock() failures means that we will
4517  // never mark a block as invalid if CheckBlock() fails. This is
4518  // protective against consensus failure if there are any unknown form
4519  // s of block malleability that cause CheckBlock() to fail; see e.g.
4520  // CVE-2012-2459 and
4521  // https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2019-February/016697.html.
4522  // Because CheckBlock() is not very expensive, the anti-DoS benefits of
4523  // caching failure (of a definitely-invalid block) are not substantial.
4524  bool ret =
4525  CheckBlock(*block, state, config.GetChainParams().GetConsensus(),
4526  BlockValidationOptions(config));
4527  if (ret) {
4528  // Store to disk
4529  ret = ActiveChainstate().AcceptBlock(config, block, state,
4530  force_processing, nullptr,
4531  new_block, min_pow_checked);
4532  }
4533 
4534  if (!ret) {
4535  GetMainSignals().BlockChecked(*block, state);
4536  return error("%s: AcceptBlock FAILED (%s)", __func__,
4537  state.ToString());
4538  }
4539  }
4540 
4541  NotifyHeaderTip(ActiveChainstate());
4542 
4543  // Only used to report errors, not invalidity - ignore it
4544  BlockValidationState state;
4545  if (!ActiveChainstate().ActivateBestChain(config, state, block)) {
4546  return error("%s: ActivateBestChain failed (%s)", __func__,
4547  state.ToString());
4548  }
4549 
4550  return true;
4551 }
4552 
4555  bool test_accept) {
4557  Chainstate &active_chainstate = ActiveChainstate();
4558  if (!active_chainstate.GetMempool()) {
4559  TxValidationState state;
4560  state.Invalid(TxValidationResult::TX_NO_MEMPOOL, "no-mempool");
4561  return MempoolAcceptResult::Failure(state);
4562  }
4563  // Use GetConfig() temporarily. It will be removed in a follow-up by
4564  // making AcceptToMemoryPool take a CChainParams instead of a Config.
4565  // This avoids passing an extra Config argument to this function that will
4566  // be removed soon.
4567  auto result =
4568  AcceptToMemoryPool(::GetConfig(), active_chainstate, tx, GetTime(),
4569  /*bypass_limits=*/false, test_accept);
4570  active_chainstate.GetMempool()->check(
4571  active_chainstate.CoinsTip(), active_chainstate.m_chain.Height() + 1);
4572  return result;
4573 }
4574 
4576  BlockValidationState &state, const CChainParams &params,
4577  Chainstate &chainstate, const CBlock &block, CBlockIndex *pindexPrev,
4578  const std::function<NodeClock::time_point()> &adjusted_time_callback,
4579  BlockValidationOptions validationOptions) {
4581  assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip());
4582  CCoinsViewCache viewNew(&chainstate.CoinsTip());
4583  BlockHash block_hash(block.GetHash());
4584  CBlockIndex indexDummy(block);
4585  indexDummy.pprev = pindexPrev;
4586  indexDummy.nHeight = pindexPrev->nHeight + 1;
4587  indexDummy.phashBlock = &block_hash;
4588 
4589  // NOTE: CheckBlockHeader is called by CheckBlock
4590  if (!ContextualCheckBlockHeader(params, block, state, chainstate.m_blockman,
4591  pindexPrev, adjusted_time_callback())) {
4592  return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__,
4593  state.ToString());
4594  }
4595 
4596  if (!CheckBlock(block, state, params.GetConsensus(), validationOptions)) {
4597  return error("%s: Consensus::CheckBlock: %s", __func__,
4598  state.ToString());
4599  }
4600 
4601  if (!ContextualCheckBlock(block, state, params.GetConsensus(),
4602  pindexPrev)) {
4603  return error("%s: Consensus::ContextualCheckBlock: %s", __func__,
4604  state.ToString());
4605  }
4606 
4607  if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew,
4608  validationOptions, nullptr, true)) {
4609  return false;
4610  }
4611 
4612  assert(state.IsValid());
4613  return true;
4614 }
4615 
4616 /* This function is called from the RPC code for pruneblockchain */
4617 void PruneBlockFilesManual(Chainstate &active_chainstate,
4618  int nManualPruneHeight) {
4619  BlockValidationState state;
4620  if (active_chainstate.FlushStateToDisk(state, FlushStateMode::NONE,
4621  nManualPruneHeight)) {
4622  LogPrintf("%s: failed to flush state (%s)\n", __func__,
4623  state.ToString());
4624  }
4625 }
4626 
4627 void Chainstate::LoadMempool(const Config &config, const ArgsManager &args) {
4628  if (!m_mempool) {
4629  return;
4630  }
4631  if (args.GetBoolArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
4632  ::LoadMempool(config, *m_mempool, *this);
4633  }
4635 }
4636 
4639  const CCoinsViewCache &coins_cache = CoinsTip();
4640  // Never called when the coins view is empty
4641  assert(!coins_cache.GetBestBlock().IsNull());
4642  const CBlockIndex *tip = m_chain.Tip();
4643 
4644  if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
4645  return true;
4646  }
4647 
4648  // Load pointer to end of best chain
4649  CBlockIndex *pindex =
4650  m_blockman.LookupBlockIndex(coins_cache.GetBestBlock());
4651  if (!pindex) {
4652  return false;
4653  }
4654  m_chain.SetTip(pindex);
4656 
4657  tip = m_chain.Tip();
4658  LogPrintf(
4659  "Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
4660  tip->GetBlockHash().ToString(), m_chain.Height(),
4663  return true;
4664 }
4665 
4667  uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false);
4668 }
4669 
4671  uiInterface.ShowProgress("", 100, false);
4672 }
4673 
4675  CCoinsView &coinsview, int nCheckLevel,
4676  int nCheckDepth) {
4678 
4679  const CChainParams &params = config.GetChainParams();
4680  const Consensus::Params &consensusParams = params.GetConsensus();
4681 
4682  if (chainstate.m_chain.Tip() == nullptr ||
4683  chainstate.m_chain.Tip()->pprev == nullptr) {
4684  return VerifyDBResult::SUCCESS;
4685  }
4686 
4687  // Verify blocks in the best chain
4688  if (nCheckDepth <= 0 || nCheckDepth > chainstate.m_chain.Height()) {
4689  nCheckDepth = chainstate.m_chain.Height();
4690  }
4691 
4692  nCheckLevel = std::max(0, std::min(4, nCheckLevel));
4693  LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth,
4694  nCheckLevel);
4695 
4696  CCoinsViewCache coins(&coinsview);
4697  CBlockIndex *pindex;
4698  CBlockIndex *pindexFailure = nullptr;
4699  int nGoodTransactions = 0;
4700  BlockValidationState state;
4701  int reportDone = 0;
4702  bool skipped_no_block_data{false};
4703  bool skipped_l3_checks{false};
4704  LogPrintf("Verification progress: 0%%\n");
4705 
4706  const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
4707 
4708  for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev;
4709  pindex = pindex->pprev) {
4710  const int percentageDone = std::max(
4711  1, std::min(99, (int)(((double)(chainstate.m_chain.Height() -
4712  pindex->nHeight)) /
4713  (double)nCheckDepth *
4714  (nCheckLevel >= 4 ? 50 : 100))));
4715  if (reportDone < percentageDone / 10) {
4716  // report every 10% step
4717  LogPrintf("Verification progress: %d%%\n", percentageDone);
4718  reportDone = percentageDone / 10;
4719  }
4720 
4721  uiInterface.ShowProgress(_("Verifying blocks...").translated,
4722  percentageDone, false);
4723  if (pindex->nHeight <= chainstate.m_chain.Height() - nCheckDepth) {
4724  break;
4725  }
4726 
4727  if ((chainstate.m_blockman.IsPruneMode() || is_snapshot_cs) &&
4728  !pindex->nStatus.hasData()) {
4729  // If pruning or running under an assumeutxo snapshot, only go
4730  // back as far as we have data.
4731  LogPrintf("VerifyDB(): block verification stopping at height %d "
4732  "(no data). This could be due to pruning or use of an "
4733  "assumeutxo snapshot.\n",
4734  pindex->nHeight);
4735  skipped_no_block_data = true;
4736  break;
4737  }
4738 
4739  CBlock block;
4740 
4741  // check level 0: read from disk
4742  if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
4743  LogPrintf(
4744  "Verification error: ReadBlockFromDisk failed at %d, hash=%s\n",
4745  pindex->nHeight, pindex->GetBlockHash().ToString());
4747  }
4748 
4749  // check level 1: verify block validity
4750  if (nCheckLevel >= 1 && !CheckBlock(block, state, consensusParams,
4751  BlockValidationOptions(config))) {
4752  LogPrintf(
4753  "Verification error: found bad block at %d, hash=%s (%s)\n",
4754  pindex->nHeight, pindex->GetBlockHash().ToString(),
4755  state.ToString());
4757  }
4758 
4759  // check level 2: verify undo validity
4760  if (nCheckLevel >= 2 && pindex) {
4761  CBlockUndo undo;
4762  if (!pindex->GetUndoPos().IsNull()) {
4763  if (!UndoReadFromDisk(undo, pindex)) {
4764  LogPrintf("Verification error: found bad undo data at %d, "
4765  "hash=%s\n",
4766  pindex->nHeight,
4767  pindex->GetBlockHash().ToString());
4769  }
4770  }
4771  }
4772  // check level 3: check for inconsistencies during memory-only
4773  // disconnect of tip blocks
4774  size_t curr_coins_usage = coins.DynamicMemoryUsage() +
4775  chainstate.CoinsTip().DynamicMemoryUsage();
4776 
4777  if (nCheckLevel >= 3) {
4778  if (curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
4779  assert(coins.GetBestBlock() == pindex->GetBlockHash());
4780  DisconnectResult res =
4781  chainstate.DisconnectBlock(block, pindex, coins);
4782  if (res == DisconnectResult::FAILED) {
4783  LogPrintf("Verification error: irrecoverable inconsistency "
4784  "in block data at %d, hash=%s\n",
4785  pindex->nHeight,
4786  pindex->GetBlockHash().ToString());
4788  }
4789  if (res == DisconnectResult::UNCLEAN) {
4790  nGoodTransactions = 0;
4791  pindexFailure = pindex;
4792  } else {
4793  nGoodTransactions += block.vtx.size();
4794  }
4795  } else {
4796  skipped_l3_checks = true;
4797  }
4798  }
4799 
4800  if (ShutdownRequested()) {
4802  }
4803  }
4804 
4805  if (pindexFailure) {
4806  LogPrintf("Verification error: coin database inconsistencies found "
4807  "(last %i blocks, %i good transactions before that)\n",
4808  chainstate.m_chain.Height() - pindexFailure->nHeight + 1,
4809  nGoodTransactions);
4811  }
4812  if (skipped_l3_checks) {
4813  LogPrintf("Skipped verification of level >=3 (insufficient database "
4814  "cache size). Consider increasing -dbcache.\n");
4815  }
4816 
4817  // store block count as we move pindex at check level >= 4
4818  int block_count = chainstate.m_chain.Height() - pindex->nHeight;
4819 
4820  // check level 4: try reconnecting blocks
4821  if (nCheckLevel >= 4 && !skipped_l3_checks) {
4822  while (pindex != chainstate.m_chain.Tip()) {
4823  const int percentageDone = std::max(
4824  1, std::min(99, 100 - int(double(chainstate.m_chain.Height() -
4825  pindex->nHeight) /
4826  double(nCheckDepth) * 50)));
4827  if (reportDone < percentageDone / 10) {
4828  // report every 10% step
4829  LogPrintf("Verification progress: %d%%\n", percentageDone);
4830  reportDone = percentageDone / 10;
4831  }
4832  uiInterface.ShowProgress(_("Verifying blocks...").translated,
4833  percentageDone, false);
4834  pindex = chainstate.m_chain.Next(pindex);
4835  CBlock block;
4836  if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
4837  LogPrintf("Verification error: ReadBlockFromDisk failed at %d, "
4838  "hash=%s\n",
4839  pindex->nHeight, pindex->GetBlockHash().ToString());
4841  }
4842  if (!chainstate.ConnectBlock(block, state, pindex, coins,
4843  BlockValidationOptions(config))) {
4844  LogPrintf("Verification error: found unconnectable block at "
4845  "%d, hash=%s (%s)\n",
4846  pindex->nHeight, pindex->GetBlockHash().ToString(),
4847  state.ToString());
4849  }
4850  if (ShutdownRequested()) {
4852  }
4853  }
4854  }
4855 
4856  LogPrintf("Verification: No coin database inconsistencies in last %i "
4857  "blocks (%i transactions)\n",
4858  block_count, nGoodTransactions);
4859 
4860  if (skipped_l3_checks) {
4862  }
4863  if (skipped_no_block_data) {
4865  }
4866  return VerifyDBResult::SUCCESS;
4867 }
4868 
4874  CCoinsViewCache &view) {
4876  // TODO: merge with ConnectBlock
4877  CBlock block;
4878  if (!ReadBlockFromDisk(block, pindex, m_chainman.GetConsensus())) {
4879  return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s",
4880  pindex->nHeight, pindex->GetBlockHash().ToString());
4881  }
4882 
4883  for (const CTransactionRef &tx : block.vtx) {
4884  // Pass check = true as every addition may be an overwrite.
4885  AddCoins(view, *tx, pindex->nHeight, true);
4886  }
4887 
4888  for (const CTransactionRef &tx : block.vtx) {
4889  if (tx->IsCoinBase()) {
4890  continue;
4891  }
4892 
4893  for (const CTxIn &txin : tx->vin) {
4894  view.SpendCoin(txin.prevout);
4895  }
4896  }
4897 
4898  return true;
4899 }
4900 
4902  LOCK(cs_main);
4903 
4904  CCoinsView &db = this->CoinsDB();
4905  CCoinsViewCache cache(&db);
4906 
4907  std::vector<BlockHash> hashHeads = db.GetHeadBlocks();
4908  if (hashHeads.empty()) {
4909  // We're already in a consistent state.
4910  return true;
4911  }
4912  if (hashHeads.size() != 2) {
4913  return error("ReplayBlocks(): unknown inconsistent state");
4914  }
4915 
4916  uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false);
4917  LogPrintf("Replaying blocks\n");
4918 
4919  // Old tip during the interrupted flush.
4920  const CBlockIndex *pindexOld = nullptr;
4921  // New tip during the interrupted flush.
4922  const CBlockIndex *pindexNew;
4923  // Latest block common to both the old and the new tip.
4924  const CBlockIndex *pindexFork = nullptr;
4925 
4926  if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
4927  return error(
4928  "ReplayBlocks(): reorganization to unknown block requested");
4929  }
4930 
4931  pindexNew = &(m_blockman.m_block_index[hashHeads[0]]);
4932 
4933  if (!hashHeads[1].IsNull()) {
4934  // The old tip is allowed to be 0, indicating it's the first flush.
4935  if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
4936  return error(
4937  "ReplayBlocks(): reorganization from unknown block requested");
4938  }
4939 
4940  pindexOld = &(m_blockman.m_block_index[hashHeads[1]]);
4941  pindexFork = LastCommonAncestor(pindexOld, pindexNew);
4942  assert(pindexFork != nullptr);
4943  }
4944 
4945  // Rollback along the old branch.
4946  while (pindexOld != pindexFork) {
4947  if (pindexOld->nHeight > 0) {
4948  // Never disconnect the genesis block.
4949  CBlock block;
4950  if (!ReadBlockFromDisk(block, pindexOld,
4951  m_chainman.GetConsensus())) {
4952  return error("RollbackBlock(): ReadBlockFromDisk() failed at "
4953  "%d, hash=%s",
4954  pindexOld->nHeight,
4955  pindexOld->GetBlockHash().ToString());
4956  }
4957 
4958  LogPrintf("Rolling back %s (%i)\n",
4959  pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
4960  DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
4961  if (res == DisconnectResult::FAILED) {
4962  return error(
4963  "RollbackBlock(): DisconnectBlock failed at %d, hash=%s",
4964  pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4965  }
4966 
4967  // If DisconnectResult::UNCLEAN is returned, it means a non-existing
4968  // UTXO was deleted, or an existing UTXO was overwritten. It
4969  // corresponds to cases where the block-to-be-disconnect never had
4970  // all its operations applied to the UTXO set. However, as both
4971  // writing a UTXO and deleting a UTXO are idempotent operations, the
4972  // result is still a version of the UTXO set with the effects of
4973  // that block undone.
4974  }
4975  pindexOld = pindexOld->pprev;
4976  }
4977 
4978  // Roll forward from the forking point to the new tip.
4979  int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
4980  for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight;
4981  ++nHeight) {
4982  const CBlockIndex &pindex{*Assert(pindexNew->GetAncestor(nHeight))};
4983  LogPrintf("Rolling forward %s (%i)\n", pindex.GetBlockHash().ToString(),
4984  nHeight);
4985  uiInterface.ShowProgress(_("Replaying blocks...").translated,
4986  (int)((nHeight - nForkHeight) * 100.0 /
4987  (pindexNew->nHeight - nForkHeight)),
4988  false);
4989  if (!RollforwardBlock(&pindex, cache)) {
4990  return false;
4991  }
4992  }
4993 
4994  cache.SetBestBlock(pindexNew->GetBlockHash());
4995  cache.Flush();
4996  uiInterface.ShowProgress("", 100, false);
4997  return true;
4998 }
4999 
5000 // May NOT be used after any connections are up as much of the peer-processing
5001 // logic assumes a consistent block index state
5004  nBlockSequenceId = 1;
5005  m_best_fork_tip = nullptr;
5006  m_best_fork_base = nullptr;
5007  setBlockIndexCandidates.clear();
5008 }
5009 
5012  // Load block index from databases
5013  bool needs_init = fReindex;
5014  if (!fReindex) {
5015  bool ret = m_blockman.LoadBlockIndexDB(GetConsensus());
5016  if (!ret) {
5017  return false;
5018  }
5019 
5020  std::vector<CBlockIndex *> vSortedByHeight{
5021  m_blockman.GetAllBlockIndices()};
5022  std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
5024 
5025  // Find start of assumed-valid region.
5026  int first_assumed_valid_height = std::numeric_limits<int>::max();
5027 
5028  for (const CBlockIndex *block : vSortedByHeight) {
5029  if (block->IsAssumedValid()) {
5030  auto chainstates = GetAll();
5031 
5032  // If we encounter an assumed-valid block index entry, ensure
5033  // that we have one chainstate that tolerates assumed-valid
5034  // entries and another that does not (i.e. the background
5035  // validation chainstate), since assumed-valid entries should
5036  // always be pending validation by a fully-validated chainstate.
5037  auto any_chain = [&](auto fnc) {
5038  return std::any_of(chainstates.cbegin(), chainstates.cend(),
5039  fnc);
5040  };
5041  assert(any_chain([](auto chainstate) {
5042  return chainstate->reliesOnAssumedValid();
5043  }));
5044  assert(any_chain([](auto chainstate) {
5045  return !chainstate->reliesOnAssumedValid();
5046  }));
5047 
5048  first_assumed_valid_height = block->nHeight;
5049  LogPrintf("Saw first assumedvalid block at height %d (%s)\n",
5050  first_assumed_valid_height, block->ToString());
5051  break;
5052  }
5053  }
5054 
5055  for (CBlockIndex *pindex : vSortedByHeight) {
5056  if (ShutdownRequested()) {
5057  return false;
5058  }
5059  if (pindex->IsAssumedValid() ||
5061  (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
5062  // Fill each chainstate's block candidate set. Only add
5063  // assumed-valid blocks to the tip candidate set if the
5064  // chainstate is allowed to rely on assumed-valid blocks.
5065  //
5066  // If all setBlockIndexCandidates contained the assumed-valid
5067  // blocks, the background chainstate's ActivateBestChain() call
5068  // would add assumed-valid blocks to the chain (based on how
5069  // FindMostWorkChain() works). Obviously we don't want this
5070  // since the purpose of the background validation chain is to
5071  // validate assumed-valid blocks.
5072  //
5073  // Note: This is considering all blocks whose height is greater
5074  // or equal to the first assumed-valid block to be assumed-valid
5075  // blocks, and excluding them from the background chainstate's
5076  // setBlockIndexCandidates set. This does mean that some blocks
5077  // which are not technically assumed-valid (later blocks on a
5078  // fork beginning before the first assumed-valid block) might
5079  // not get added to the background chainstate, but this is ok,
5080  // because they will still be attached to the active chainstate
5081  // if they actually contain more work.
5082  //
5083  // Instead of this height-based approach, an earlier attempt was
5084  // made at detecting "holistically" whether the block index
5085  // under consideration relied on an assumed-valid ancestor, but
5086  // this proved to be too slow to be practical.
5087  for (Chainstate *chainstate : GetAll()) {
5088  if (chainstate->reliesOnAssumedValid() ||
5089  pindex->nHeight < first_assumed_valid_height) {
5090  chainstate->setBlockIndexCandidates.insert(pindex);
5091  }
5092  }
5093  }
5094 
5095  if (pindex->nStatus.isInvalid() &&
5096  (!m_best_invalid ||
5097  pindex->nChainWork > m_best_invalid->nChainWork)) {
5098  m_best_invalid = pindex;
5099  }
5100 
5101  if (pindex->nStatus.isOnParkedChain() &&
5102  (!m_best_parked ||
5103  pindex->nChainWork > m_best_parked->nChainWork)) {
5104  m_best_parked = pindex;
5105  }
5106 
5107  if (pindex->IsValid(BlockValidity::TREE) &&
5108  (m_best_header == nullptr ||
5109  CBlockIndexWorkComparator()(m_best_header, pindex))) {
5110  m_best_header = pindex;
5111  }
5112  }
5113 
5114  needs_init = m_blockman.m_block_index.empty();
5115  }
5116 
5117  if (needs_init) {
5118  // Everything here is for *new* reindex/DBs. Thus, though
5119  // LoadBlockIndexDB may have set fReindex if we shut down
5120  // mid-reindex previously, we don't check fReindex and
5121  // instead only check it prior to LoadBlockIndexDB to set
5122  // needs_init.
5123 
5124  LogPrintf("Initializing databases...\n");
5125  }
5126  return true;
5127 }
5128 
5130  LOCK(cs_main);
5131 
5132  const CChainParams &params{m_chainman.GetParams()};
5133 
5134  // Check whether we're already initialized by checking for genesis in
5135  // m_blockman.m_block_index. Note that we can't use m_chain here, since it
5136  // is set based on the coins db, not the block index db, which is the only
5137  // thing loaded at this point.
5138  if (m_blockman.m_block_index.count(params.GenesisBlock().GetHash())) {
5139  return true;
5140  }
5141 
5142  try {
5143  const CBlock &block = params.GenesisBlock();
5144  FlatFilePos blockPos{
5145  m_blockman.SaveBlockToDisk(block, 0, m_chain, params, nullptr)};
5146  if (blockPos.IsNull()) {
5147  return error("%s: writing genesis block to disk failed", __func__);
5148  }
5149  CBlockIndex *pindex =
5150  m_blockman.AddToBlockIndex(block, m_chainman.m_best_header);
5151  ReceivedBlockTransactions(block, pindex, blockPos);
5152  } catch (const std::runtime_error &e) {
5153  return error("%s: failed to write genesis block: %s", __func__,
5154  e.what());
5155  }
5156 
5157  return true;
5158 }
5159 
5160 void Chainstate::LoadExternalBlockFile(
5161  const Config &config, FILE *fileIn, FlatFilePos *dbp,
5162  std::multimap<BlockHash, FlatFilePos> *blocks_with_unknown_parent) {
5164 
5165  // Either both should be specified (-reindex), or neither (-loadblock).
5166  assert(!dbp == !blocks_with_unknown_parent);
5167 
5168  int64_t nStart = GetTimeMillis();
5169  const CChainParams &params{m_chainman.GetParams()};
5170 
5171  int nLoaded = 0;
5172  try {
5173  // This takes over fileIn and calls fclose() on it in the CBufferedFile
5174  // destructor. Make sure we have at least 2*MAX_TX_SIZE space in there
5175  // so any transaction can fit in the buffer.
5176  CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK,
5177  CLIENT_VERSION);
5178  uint64_t nRewind = blkdat.GetPos();
5179  while (!blkdat.eof()) {
5180  if (ShutdownRequested()) {
5181  return;
5182  }
5183 
5184  blkdat.SetPos(nRewind);
5185  // Start one byte further next time, in case of failure.
5186  nRewind++;
5187  // Remove former limit.
5188  blkdat.SetLimit();
5189  unsigned int nSize = 0;
5190  try {
5191  // Locate a header.
5193  blkdat.FindByte(char(params.DiskMagic()[0]));
5194  nRewind = blkdat.GetPos() + 1;
5195  blkdat >> buf;
5196  if (memcmp(buf, params.DiskMagic().data(),
5198  continue;
5199  }
5200 
5201  // Read size.
5202  blkdat >> nSize;
5203  if (nSize < 80) {
5204  continue;
5205  }
5206  } catch (const std::exception &) {
5207  // No valid block header found; don't complain.
5208  break;
5209  }
5210 
5211  try {
5212  // read block
5213  uint64_t nBlockPos = blkdat.GetPos();
5214  if (dbp) {
5215  dbp->nPos = nBlockPos;
5216  }
5217  blkdat.SetLimit(nBlockPos + nSize);
5218  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
5219  CBlock &block = *pblock;
5220  blkdat >> block;
5221  nRewind = blkdat.GetPos();
5222 
5223  const BlockHash hash = block.GetHash();
5224  {
5225  LOCK(cs_main);
5226  // detect out of order blocks, and store them for later
5227  if (hash != params.GetConsensus().hashGenesisBlock &&
5229  LogPrint(
5231  "%s: Out of order block %s, parent %s not known\n",
5232  __func__, hash.ToString(),
5233  block.hashPrevBlock.ToString());
5234  if (dbp && blocks_with_unknown_parent) {
5235  blocks_with_unknown_parent->emplace(
5236  block.hashPrevBlock, *dbp);
5237  }
5238  continue;
5239  }
5240 
5241  // process in case the block isn't known yet
5242  const CBlockIndex *pindex =
5244  if (!pindex || !pindex->nStatus.hasData()) {
5245  BlockValidationState state;
5246  if (AcceptBlock(config, pblock, state, true, dbp,
5247  nullptr, true)) {
5248  nLoaded++;
5249  }
5250  if (state.IsError()) {
5251  break;
5252  }
5253  } else if (hash != params.GetConsensus().hashGenesisBlock &&
5254  pindex->nHeight % 1000 == 0) {
5255  LogPrint(
5257  "Block Import: already had block %s at height %d\n",
5258  hash.ToString(), pindex->nHeight);
5259  }
5260  }
5261 
5262  // Activate the genesis block so normal node progress can
5263  // continue
5264  if (hash == params.GetConsensus().hashGenesisBlock) {
5265  BlockValidationState state;
5266  if (!ActivateBestChain(config, state, nullptr)) {
5267  break;
5268  }
5269  }
5270 
5271  if (m_blockman.IsPruneMode() && !fReindex && pblock) {
5272  // Must update the tip for pruning to work while importing
5273  // with -loadblock. This is a tradeoff to conserve disk
5274  // space at the expense of time spent updating the tip to be
5275  // able to prune. Otherwise, ActivateBestChain won't be
5276  // called by the import process until after all of the block
5277  // files are loaded. ActivateBestChain can be called by
5278  // concurrent network message processing, but that is not
5279  // reliable for the purpose of pruning while importing.
5280  BlockValidationState state;
5281  if (!ActivateBestChain(config, state, pblock)) {
5283  "failed to activate chain (%s)\n",
5284  state.ToString());
5285  break;
5286  }
5287  }
5288 
5289  NotifyHeaderTip(*this);
5290 
5291  if (!blocks_with_unknown_parent) {
5292  continue;
5293  }
5294 
5295  // Recursively process earlier encountered successors of this
5296  // block
5297  std::deque<BlockHash> queue;
5298  queue.push_back(hash);
5299  while (!queue.empty()) {
5300  BlockHash head = queue.front();
5301  queue.pop_front();
5302  auto range = blocks_with_unknown_parent->equal_range(head);
5303  while (range.first != range.second) {
5304  std::multimap<BlockHash, FlatFilePos>::iterator it =
5305  range.first;
5306  std::shared_ptr<CBlock> pblockrecursive =
5307  std::make_shared<CBlock>();
5308  if (ReadBlockFromDisk(*pblockrecursive, it->second,
5309  params.GetConsensus())) {
5310  LogPrint(
5312  "%s: Processing out of order child %s of %s\n",
5313  __func__, pblockrecursive->GetHash().ToString(),
5314  head.ToString());
5315  LOCK(cs_main);
5316  BlockValidationState dummy;
5317  if (AcceptBlock(config, pblockrecursive, dummy,
5318  true, &it->second, nullptr, true)) {
5319  nLoaded++;
5320  queue.push_back(pblockrecursive->GetHash());
5321  }
5322  }
5323  range.first++;
5324  blocks_with_unknown_parent->erase(it);
5325  NotifyHeaderTip(*this);
5326  }
5327  }
5328  } catch (const std::exception &e) {
5329  // Historical bugs added extra data to the block files that does
5330  // not deserialize cleanly. Commonly this data is between
5331  // readable blocks, but it does not really matter. Such data is
5332  // not fatal to the import process. The code that reads the
5333  // block files deals with invalid data by simply ignoring it. It
5334  // continues to search for the next {4 byte magic message start
5335  // bytes + 4 byte length + block} that does deserialize cleanly
5336  // and passes all of the other block validation checks dealing
5337  // with POW and the merkle root, etc... We merely note with this
5338  // informational log message when unexpected data is
5339  // encountered. We could also be experiencing a storage system
5340  // read error, or a read of a previous bad write. These are
5341  // possible, but less likely scenarios. We don't have enough
5342  // information to tell a difference here. The reindex process is
5343  // not the place to attempt to clean and/or compact the block
5344  // files. If so desired, a studious node operator may use
5345  // knowledge of the fact that the block files are not entirely
5346  // pristine in order to prepare a set of pristine, and perhaps
5347  // ordered, block files for later reindexing.
5349  "%s: unexpected data at file offset 0x%x - %s. "
5350  "continuing\n",
5351  __func__, (nRewind - 1), e.what());
5352  }
5353  }
5354  } catch (const std::runtime_error &e) {
5355  AbortNode(std::string("System error: ") + e.what());
5356  }
5357 
5358  LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded,
5359  GetTimeMillis() - nStart);
5360 }
5361 
5363  if (!fCheckBlockIndex) {
5364  return;
5365  }
5366 
5367  LOCK(cs_main);
5368 
5369  // During a reindex, we read the genesis block and call CheckBlockIndex
5370  // before ActivateBestChain, so we have the genesis block in
5371  // m_blockman.m_block_index but no active chain. (A few of the tests when
5372  // iterating the block tree require that m_chain has been initialized.)
5373  if (m_chain.Height() < 0) {
5374  assert(m_blockman.m_block_index.size() <= 1);
5375  return;
5376  }
5377 
5378  // Build forward-pointing map of the entire block tree.
5379  std::multimap<CBlockIndex *, CBlockIndex *> forward;
5380  for (auto &[_, block_index] : m_blockman.m_block_index) {
5381  forward.emplace(block_index.pprev, &block_index);
5382  }
5383 
5384  assert(forward.size() == m_blockman.m_block_index.size());
5385 
5386  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5387  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5388  rangeGenesis = forward.equal_range(nullptr);
5389  CBlockIndex *pindex = rangeGenesis.first->second;
5390  rangeGenesis.first++;
5391  // There is only one index entry with parent nullptr.
5392  assert(rangeGenesis.first == rangeGenesis.second);
5393 
5394  // Iterate over the entire block tree, using depth-first search.
5395  // Along the way, remember whether there are blocks on the path from genesis
5396  // block being explored which are the first to have certain properties.
5397  size_t nNodes = 0;
5398  int nHeight = 0;
5399  // Oldest ancestor of pindex which is invalid.
5400  CBlockIndex *pindexFirstInvalid = nullptr;
5401  // Oldest ancestor of pindex which is parked.
5402  CBlockIndex *pindexFirstParked = nullptr;
5403  // Oldest ancestor of pindex which does not have data available.
5404  CBlockIndex *pindexFirstMissing = nullptr;
5405  // Oldest ancestor of pindex for which nTx == 0.
5406  CBlockIndex *pindexFirstNeverProcessed = nullptr;
5407  // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE
5408  // (regardless of being valid or not).
5409  CBlockIndex *pindexFirstNotTreeValid = nullptr;
5410  // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS
5411  // (regardless of being valid or not).
5412  CBlockIndex *pindexFirstNotTransactionsValid = nullptr;
5413  // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN
5414  // (regardless of being valid or not).
5415  CBlockIndex *pindexFirstNotChainValid = nullptr;
5416  // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS
5417  // (regardless of being valid or not).
5418  CBlockIndex *pindexFirstNotScriptsValid = nullptr;
5419  while (pindex != nullptr) {
5420  nNodes++;
5421  if (pindexFirstInvalid == nullptr && pindex->nStatus.hasFailed()) {
5422  pindexFirstInvalid = pindex;
5423  }
5424  if (pindexFirstParked == nullptr && pindex->nStatus.isParked()) {
5425  pindexFirstParked = pindex;
5426  }
5427  // Assumed-valid index entries will not have data since we haven't
5428  // downloaded the full block yet.
5429  if (pindexFirstMissing == nullptr && !pindex->nStatus.hasData() &&
5430  !pindex->IsAssumedValid()) {
5431  pindexFirstMissing = pindex;
5432  }
5433  if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) {
5434  pindexFirstNeverProcessed = pindex;
5435  }
5436  if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr &&
5437  pindex->nStatus.getValidity() < BlockValidity::TREE) {
5438  pindexFirstNotTreeValid = pindex;
5439  }
5440  if (pindex->pprev != nullptr && !pindex->IsAssumedValid()) {
5441  if (pindexFirstNotTransactionsValid == nullptr &&
5442  pindex->nStatus.getValidity() < BlockValidity::TRANSACTIONS) {
5443  pindexFirstNotTransactionsValid = pindex;
5444  }
5445  if (pindexFirstNotChainValid == nullptr &&
5446  pindex->nStatus.getValidity() < BlockValidity::CHAIN) {
5447  pindexFirstNotChainValid = pindex;
5448  }
5449  if (pindexFirstNotScriptsValid == nullptr &&
5450  pindex->nStatus.getValidity() < BlockValidity::SCRIPTS) {
5451  pindexFirstNotScriptsValid = pindex;
5452  }
5453  }
5454 
5455  // Begin: actual consistency checks.
5456  if (pindex->pprev == nullptr) {
5457  // Genesis block checks.
5458  // Genesis block's hash must match.
5459  assert(pindex->GetBlockHash() ==
5461  // The current active chain's genesis block must be this block.
5462  assert(pindex == m_chain.Genesis());
5463  }
5464  if (!pindex->HaveTxsDownloaded()) {
5465  // nSequenceId can't be set positive for blocks that aren't linked
5466  // (negative is used for preciousblock)
5467  assert(pindex->nSequenceId <= 0);
5468  }
5469  // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or
5470  // not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0
5471  // (or VALID_TRANSACTIONS) if no pruning has occurred.
5472  // Unless these indexes are assumed valid and pending block download on
5473  // a background chainstate.
5474  if (!m_blockman.m_have_pruned && !pindex->IsAssumedValid()) {
5475  // If we've never pruned, then HAVE_DATA should be equivalent to nTx
5476  // > 0
5477  assert(pindex->nStatus.hasData() == (pindex->nTx > 0));
5478  assert(pindexFirstMissing == pindexFirstNeverProcessed);
5479  } else if (pindex->nStatus.hasData()) {
5480  // If we have pruned, then we can only say that HAVE_DATA implies
5481  // nTx > 0
5482  assert(pindex->nTx > 0);
5483  }
5484  if (pindex->nStatus.hasUndo()) {
5485  assert(pindex->nStatus.hasData());
5486  }
5487  if (pindex->IsAssumedValid()) {
5488  // Assumed-valid blocks should have some nTx value.
5489  assert(pindex->nTx > 0);
5490  // Assumed-valid blocks should connect to the main chain.
5491  assert(pindex->nStatus.getValidity() >= BlockValidity::TREE);
5492  } else {
5493  // Otherwise there should only be an nTx value if we have
5494  // actually seen a block's transactions.
5495  // This is pruning-independent.
5496  assert((pindex->nStatus.getValidity() >=
5497  BlockValidity::TRANSACTIONS) == (pindex->nTx > 0));
5498  }
5499  // All parents having had data (at some point) is equivalent to all
5500  // parents being VALID_TRANSACTIONS, which is equivalent to
5501  // HaveTxsDownloaded(). All parents having had data (at some point) is
5502  // equivalent to all parents being VALID_TRANSACTIONS, which is
5503  // equivalent to HaveTxsDownloaded().
5504  assert((pindexFirstNeverProcessed == nullptr) ==
5505  (pindex->HaveTxsDownloaded()));
5506  assert((pindexFirstNotTransactionsValid == nullptr) ==
5507  (pindex->HaveTxsDownloaded()));
5508  // nHeight must be consistent.
5509  assert(pindex->nHeight == nHeight);
5510  // For every block except the genesis block, the chainwork must be
5511  // larger than the parent's.
5512  assert(pindex->pprev == nullptr ||
5513  pindex->nChainWork >= pindex->pprev->nChainWork);
5514  // The pskip pointer must point back for all but the first 2 blocks.
5515  assert(nHeight < 2 ||
5516  (pindex->pskip && (pindex->pskip->nHeight < nHeight)));
5517  // All m_blockman.m_block_index entries must at least be TREE valid
5518  assert(pindexFirstNotTreeValid == nullptr);
5519  if (pindex->nStatus.getValidity() >= BlockValidity::TREE) {
5520  // TREE valid implies all parents are TREE valid
5521  assert(pindexFirstNotTreeValid == nullptr);
5522  }
5523  if (pindex->nStatus.getValidity() >= BlockValidity::CHAIN) {
5524  // CHAIN valid implies all parents are CHAIN valid
5525  assert(pindexFirstNotChainValid == nullptr);
5526  }
5527  if (pindex->nStatus.getValidity() >= BlockValidity::SCRIPTS) {
5528  // SCRIPTS valid implies all parents are SCRIPTS valid
5529  assert(pindexFirstNotScriptsValid == nullptr);
5530  }
5531  if (pindexFirstInvalid == nullptr) {
5532  // Checks for not-invalid blocks.
5533  // The failed mask cannot be set for blocks without invalid parents.
5534  assert(!pindex->nStatus.isInvalid());
5535  }
5536  if (pindexFirstParked == nullptr) {
5537  // Checks for not-parked blocks.
5538  // The parked mask cannot be set for blocks without parked parents.
5539  // (i.e., hasParkedParent only if an ancestor is properly parked).
5540  assert(!pindex->nStatus.isOnParkedChain());
5541  }
5542  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
5543  pindexFirstNeverProcessed == nullptr) {
5544  if (pindexFirstInvalid == nullptr) {
5545  // Don't perform this check for the background chainstate since
5546  // its setBlockIndexCandidates shouldn't have some entries (i.e.
5547  // those past the snapshot block) which do exist in the block
5548  // index for the active chainstate.
5549  if (this == &m_chainman.ActiveChainstate()) {
5550  // If this block sorts at least as good as the current tip
5551  // and is valid and we have all data for its parents, it
5552  // must be in setBlockIndexCandidates or be parked.
5553  if (pindexFirstMissing == nullptr) {
5554  assert(pindex->nStatus.isOnParkedChain() ||
5555  setBlockIndexCandidates.count(pindex));
5556  }
5557  // m_chain.Tip() must also be there even if some data has
5558  // been pruned.
5559  if (pindex == m_chain.Tip()) {
5560  assert(setBlockIndexCandidates.count(pindex));
5561  }
5562  }
5563  // If some parent is missing, then it could be that this block
5564  // was in setBlockIndexCandidates but had to be removed because
5565  // of the missing data. In this case it must be in
5566  // m_blocks_unlinked -- see test below.
5567  }
5568  } else {
5569  // If this block sorts worse than the current tip or some ancestor's
5570  // block has never been seen, it cannot be in
5571  // setBlockIndexCandidates.
5572  assert(setBlockIndexCandidates.count(pindex) == 0);
5573  }
5574  // Check whether this block is in m_blocks_unlinked.
5575  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5576  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5577  rangeUnlinked =
5578  m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
5579  bool foundInUnlinked = false;
5580  while (rangeUnlinked.first != rangeUnlinked.second) {
5581  assert(rangeUnlinked.first->first == pindex->pprev);
5582  if (rangeUnlinked.first->second == pindex) {
5583  foundInUnlinked = true;
5584  break;
5585  }
5586  rangeUnlinked.first++;
5587  }
5588  if (pindex->pprev && pindex->nStatus.hasData() &&
5589  pindexFirstNeverProcessed != nullptr &&
5590  pindexFirstInvalid == nullptr) {
5591  // If this block has block data available, some parent was never
5592  // received, and has no invalid parents, it must be in
5593  // m_blocks_unlinked.
5594  assert(foundInUnlinked);
5595  }
5596  if (!pindex->nStatus.hasData()) {
5597  // Can't be in m_blocks_unlinked if we don't HAVE_DATA
5598  assert(!foundInUnlinked);
5599  }
5600  if (pindexFirstMissing == nullptr) {
5601  // We aren't missing data for any parent -- cannot be in
5602  // m_blocks_unlinked.
5603  assert(!foundInUnlinked);
5604  }
5605  if (pindex->pprev && pindex->nStatus.hasData() &&
5606  pindexFirstNeverProcessed == nullptr &&
5607  pindexF