Bitcoin ABC  0.24.7
P2P Digital Currency
validation.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2018 The Bitcoin Core developers
3 // Copyright (c) 2017-2020 The Bitcoin developers
4 // Distributed under the MIT software license, see the accompanying
5 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
6 
7 #include <validation.h>
8 
9 #include <arith_uint256.h>
10 #include <avalanche/avalanche.h>
11 #include <avalanche/processor.h>
12 #include <blockdb.h>
13 #include <blockvalidity.h>
14 #include <chainparams.h>
15 #include <checkpoints.h>
16 #include <checkqueue.h>
17 #include <config.h>
18 #include <consensus/activation.h>
19 #include <consensus/merkle.h>
20 #include <consensus/tx_check.h>
21 #include <consensus/tx_verify.h>
22 #include <consensus/validation.h>
23 #include <hash.h>
24 #include <index/txindex.h>
25 #include <logging.h>
26 #include <logging/timer.h>
27 #include <minerfund.h>
28 #include <node/ui_interface.h>
29 #include <policy/fees.h>
30 #include <policy/mempool.h>
31 #include <policy/policy.h>
32 #include <policy/settings.h>
33 #include <pow/aserti32d.h> // For ResetASERTAnchorBlockCache
34 #include <pow/pow.h>
35 #include <primitives/block.h>
36 #include <primitives/transaction.h>
37 #include <random.h>
38 #include <reverse_iterator.h>
39 #include <script/script.h>
40 #include <script/scriptcache.h>
41 #include <script/sigcache.h>
42 #include <shutdown.h>
43 #include <timedata.h>
44 #include <tinyformat.h>
45 #include <txdb.h>
46 #include <txmempool.h>
47 #include <undo.h>
48 #include <util/check.h> // For NDEBUG compile time check
49 #include <util/moneystr.h>
50 #include <util/strencodings.h>
51 #include <util/system.h>
52 #include <util/translation.h>
53 #include <validationinterface.h>
54 #include <warnings.h>
55 
56 #include <boost/algorithm/string/replace.hpp>
57 
58 #include <optional>
59 #include <string>
60 #include <thread>
61 
62 #define MICRO 0.000001
63 #define MILLI 0.001
64 
66 static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
68 static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
69 const std::vector<std::string> CHECKLEVEL_DOC{
70  "level 0 reads the blocks from disk",
71  "level 1 verifies block validity",
72  "level 2 verifies undo data",
73  "level 3 checks disconnection of tip blocks",
74  "level 4 tries to reconnect the blocks",
75  "each level includes the checks of the previous levels",
76 };
77 
79 
81  LOCK(::cs_main);
84 }
85 
87  LOCK(::cs_main);
89 }
90 
104 
107 std::condition_variable g_best_block_cv;
109 std::atomic_bool fImporting(false);
110 std::atomic_bool fReindex(false);
111 bool fHavePruned = false;
112 bool fPruneMode = false;
113 bool fRequireStandard = true;
114 bool fCheckBlockIndex = false;
116 uint64_t nPruneTarget = 0;
118 
121 
123 
124 // Internal stuff
125 namespace {
126 CBlockIndex *pindexBestInvalid = nullptr;
127 CBlockIndex *pindexBestParked = nullptr;
128 
129 RecursiveMutex cs_LastBlockFile;
130 std::vector<CBlockFileInfo> vinfoBlockFile;
131 int nLastBlockFile = 0;
137 bool fCheckForPruning = false;
138 
140 std::set<const CBlockIndex *> setDirtyBlockIndex;
141 
143 std::set<int> setDirtyFileInfo;
144 } // namespace
145 
147  : excessiveBlockSize(config.GetMaxBlockSize()), checkPoW(true),
148  checkMerkleRoot(true) {}
149 
152  BlockMap::const_iterator it = g_chainman.BlockIndex().find(hash);
153  return it == g_chainman.BlockIndex().end() ? nullptr : it->second;
154 }
155 
157  const CBlockLocator &locator) {
159 
160  // Find the latest block common to locator and chain - we expect that
161  // locator.vHave is sorted descending by height.
162  for (const BlockHash &hash : locator.vHave) {
163  CBlockIndex *pindex = LookupBlockIndex(hash);
164  if (pindex) {
165  if (chain.Contains(pindex)) {
166  return pindex;
167  }
168  if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
169  return chain.Tip();
170  }
171  }
172  }
173  return chain.Genesis();
174 }
175 
176 std::unique_ptr<CBlockTreeDB> pblocktree;
177 
178 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
179  const CBlockIndex *pindex);
180 
183  assert(lp);
184  // If there are relative lock times then the maxInputBlock will be set
185  // If there are no relative lock times, the LockPoints don't depend on the
186  // chain
187  if (lp->maxInputBlock) {
188  // Check whether ::ChainActive() is an extension of the block at which
189  // the LockPoints calculation was valid. If not LockPoints are no longer
190  // valid.
191  if (!::ChainActive().Contains(lp->maxInputBlock)) {
192  return false;
193  }
194  }
195 
196  // LockPoints still valid
197  return true;
198 }
199 
200 bool CheckSequenceLocks(const CTxMemPool &pool, const CTransaction &tx,
201  int flags, LockPoints *lp, bool useExistingLockPoints) {
203  AssertLockHeld(pool.cs);
204 
205  CBlockIndex *tip = ::ChainActive().Tip();
206  assert(tip != nullptr);
207 
208  CBlockIndex index;
209  index.pprev = tip;
210  // CheckSequenceLocks() uses ::ChainActive().Height()+1 to evaluate height
211  // based locks because when SequenceLocks() is called within ConnectBlock(),
212  // the height of the block *being* evaluated is what is used. Thus if we
213  // want to know if a transaction can be part of the *next* block, we need to
214  // use one more than ::ChainActive().Height()
215  index.nHeight = tip->nHeight + 1;
216 
217  std::pair<int, int64_t> lockPair;
218  if (useExistingLockPoints) {
219  assert(lp);
220  lockPair.first = lp->height;
221  lockPair.second = lp->time;
222  } else {
223  // CoinsTip() contains the UTXO set for ::ChainActive().Tip()
224  CCoinsViewMemPool viewMemPool(&::ChainstateActive().CoinsTip(), pool);
225  std::vector<int> prevheights;
226  prevheights.resize(tx.vin.size());
227  for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
228  const CTxIn &txin = tx.vin[txinIndex];
229  Coin coin;
230  if (!viewMemPool.GetCoin(txin.prevout, coin)) {
231  return error("%s: Missing input", __func__);
232  }
233  if (coin.GetHeight() == MEMPOOL_HEIGHT) {
234  // Assume all mempool transaction confirm in the next block
235  prevheights[txinIndex] = tip->nHeight + 1;
236  } else {
237  prevheights[txinIndex] = coin.GetHeight();
238  }
239  }
240  lockPair = CalculateSequenceLocks(tx, flags, prevheights, index);
241  if (lp) {
242  lp->height = lockPair.first;
243  lp->time = lockPair.second;
244  // Also store the hash of the block with the highest height of all
245  // the blocks which have sequence locked prevouts. This hash needs
246  // to still be on the chain for these LockPoint calculations to be
247  // valid.
248  // Note: It is impossible to correctly calculate a maxInputBlock if
249  // any of the sequence locked inputs depend on unconfirmed txs,
250  // except in the special case where the relative lock time/height is
251  // 0, which is equivalent to no sequence lock. Since we assume input
252  // height of tip+1 for mempool txs and test the resulting lockPair
253  // from CalculateSequenceLocks against tip+1. We know
254  // EvaluateSequenceLocks will fail if there was a non-zero sequence
255  // lock on a mempool input, so we can use the return value of
256  // CheckSequenceLocks to indicate the LockPoints validity.
257  int maxInputHeight = 0;
258  for (const int height : prevheights) {
259  // Can ignore mempool inputs since we'll fail if they had
260  // non-zero locks.
261  if (height != tip->nHeight + 1) {
262  maxInputHeight = std::max(maxInputHeight, height);
263  }
264  }
265  lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
266  }
267  }
268  return EvaluateSequenceLocks(index, lockPair);
269 }
270 
271 // Command-line argument "-replayprotectionactivationtime=<timestamp>" will
272 // cause the node to switch to replay protected SigHash ForkID value when the
273 // median timestamp of the previous 11 blocks is greater than or equal to
274 // <timestamp>. Defaults to the pre-defined timestamp when not set.
276  int64_t nMedianTimePast) {
277  return nMedianTimePast >= gArgs.GetArg("-replayprotectionactivationtime",
278  params.gluonActivationTime);
279 }
280 
282  const CBlockIndex *pindexPrev) {
283  if (pindexPrev == nullptr) {
284  return false;
285  }
286 
287  return IsReplayProtectionEnabled(params, pindexPrev->GetMedianTimePast());
288 }
289 
290 // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
291 // were somehow broken and returning the wrong scriptPubKeys
293  const CTransaction &tx, TxValidationState &state,
294  const CCoinsViewCache &view, const CTxMemPool &pool, const uint32_t flags,
295  PrecomputedTransactionData &txdata, int &nSigChecksOut)
298 
299  // pool.cs should be locked already, but go ahead and re-take the lock here
300  // to enforce that mempool doesn't change between when we check the view and
301  // when we actually call through to CheckInputScripts
302  LOCK(pool.cs);
303 
304  assert(!tx.IsCoinBase());
305  for (const CTxIn &txin : tx.vin) {
306  const Coin &coin = view.AccessCoin(txin.prevout);
307 
308  // AcceptToMemoryPoolWorker has already checked that the coins are
309  // available, so this shouldn't fail. If the inputs are not available
310  // here then return false.
311  if (coin.IsSpent()) {
312  return false;
313  }
314 
315  // Check equivalence for available inputs.
316  const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId());
317  if (txFrom) {
318  assert(txFrom->GetId() == txin.prevout.GetTxId());
319  assert(txFrom->vout.size() > txin.prevout.GetN());
320  assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut());
321  } else {
322  const Coin &coinFromDisk =
324  assert(!coinFromDisk.IsSpent());
325  assert(coinFromDisk.GetTxOut() == coin.GetTxOut());
326  }
327  }
328 
329  // Call CheckInputScripts() to cache signature and script validity against
330  // current tip consensus rules.
331  return CheckInputScripts(tx, state, view, flags, /* cacheSigStore = */ true,
332  /* cacheFullScriptStore = */ true, txdata,
333  nSigChecksOut);
334 }
335 
336 namespace {
337 
338 class MemPoolAccept {
339 public:
340  MemPoolAccept(CTxMemPool &mempool)
341  : m_pool(mempool), m_view(&m_dummy),
342  m_viewmempool(&::ChainstateActive().CoinsTip(), m_pool),
343  m_limit_ancestors(
344  gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
345  m_limit_ancestor_size(
346  gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) *
347  1000),
348  m_limit_descendants(
349  gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
350  m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize",
352  1000) {}
353 
354  // We put the arguments we're handed into a struct, so we can pass them
355  // around easier.
356  struct ATMPArgs {
357  const Config &m_config;
358  TxValidationState &m_state;
359  const int64_t m_accept_time;
360  const bool m_bypass_limits;
361  /*
362  * Return any outpoints which were not previously present in the coins
363  * cache, but were added as a result of validating the tx for mempool
364  * acceptance. This allows the caller to optionally remove the cache
365  * additions if the associated transaction ends up being rejected by
366  * the mempool.
367  */
368  std::vector<COutPoint> &m_coins_to_uncache;
369  const bool m_test_accept;
370  Amount *m_fee_out;
371  };
372 
373  // Single transaction acceptance
374  bool AcceptSingleTransaction(const CTransactionRef &ptx, ATMPArgs &args)
376 
377 private:
378  // All the intermediate state that gets passed between the various levels
379  // of checking a given transaction.
380  struct Workspace {
381  Workspace(const CTransactionRef &ptx,
382  const uint32_t next_block_script_verify_flags)
383  : m_ptx(ptx),
384  m_next_block_script_verify_flags(next_block_script_verify_flags) {
385  }
386  CTxMemPool::setEntries m_ancestors;
387  std::unique_ptr<CTxMemPoolEntry> m_entry;
388 
389  Amount m_modified_fees;
390 
391  const CTransactionRef &m_ptx;
392 
393  // ABC specific flags that are used in both PreChecks and
394  // ConsensusScriptChecks
395  const uint32_t m_next_block_script_verify_flags;
396  int m_sig_checks_standard;
397  };
398 
399  // Run the policy checks on a given transaction, excluding any script
400  // checks. Looks up inputs, calculates feerate, considers replacement,
401  // evaluates package limits, etc. As this function can be invoked for "free"
402  // by a peer, only tests that are fast should be done here (to avoid CPU
403  // DoS).
404  bool PreChecks(ATMPArgs &args, Workspace &ws)
405  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
406 
407  // Re-run the script checks, using consensus flags, and try to cache the
408  // result in the scriptcache. This should be done after
409  // PolicyScriptChecks(). This requires that all inputs either be in our
410  // utxo set or in the mempool.
411  bool ConsensusScriptChecks(ATMPArgs &args, Workspace &ws,
414 
415  // Try to add the transaction to the mempool, removing any conflicts first.
416  // Returns true if the transaction is in the mempool after any size
417  // limiting is performed, false otherwise.
418  bool Finalize(ATMPArgs &args, Workspace &ws)
419  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
420 
421 private:
422  CTxMemPool &m_pool;
423  CCoinsViewCache m_view;
424  CCoinsViewMemPool m_viewmempool;
425  CCoinsView m_dummy;
426 
427  // The package limits in effect at the time of invocation.
428  const size_t m_limit_ancestors;
429  const size_t m_limit_ancestor_size;
430  // These may be modified while evaluating a transaction (eg to account for
431  // in-mempool conflicts; see below).
432  size_t m_limit_descendants;
433  size_t m_limit_descendant_size;
434 };
435 
436 bool MemPoolAccept::PreChecks(ATMPArgs &args, Workspace &ws) {
437  const CTransactionRef &ptx = ws.m_ptx;
438  const CTransaction &tx = *ws.m_ptx;
439  const TxId &txid = ws.m_ptx->GetId();
440 
441  // Copy/alias what we need out of args
442  TxValidationState &state = args.m_state;
443  const int64_t nAcceptTime = args.m_accept_time;
444  const bool bypass_limits = args.m_bypass_limits;
445  std::vector<COutPoint> &coins_to_uncache = args.m_coins_to_uncache;
446 
447  // Alias what we need out of ws
448  CTxMemPool::setEntries &setAncestors = ws.m_ancestors;
449  std::unique_ptr<CTxMemPoolEntry> &entry = ws.m_entry;
450  Amount &nModifiedFees = ws.m_modified_fees;
451 
452  // Coinbase is only valid in a block, not as a loose transaction.
453  if (!CheckRegularTransaction(tx, state)) {
454  // state filled in by CheckRegularTransaction.
455  return false;
456  }
457 
458  // Rather not work on nonstandard transactions (unless -testnet)
459  std::string reason;
460  if (fRequireStandard && !IsStandardTx(tx, reason)) {
461  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
462  }
463 
464  // Only accept nLockTime-using transactions that can be mined in the next
465  // block; we don't want our mempool filled up with transactions that can't
466  // be mined yet.
467  TxValidationState ctxState;
469  args.m_config.GetChainParams().GetConsensus(), tx, ctxState,
471  // We copy the state from a dummy to ensure we don't increase the
472  // ban score of peer for transaction that could be valid in the future.
474  ctxState.GetRejectReason(),
475  ctxState.GetDebugMessage());
476  }
477 
478  // Is it already in the memory pool?
479  if (m_pool.exists(txid)) {
481  "txn-already-in-mempool");
482  }
483 
484  // Check for conflicts with in-memory transactions
485  for (const CTxIn &txin : tx.vin) {
486  auto itConflicting = m_pool.mapNextTx.find(txin.prevout);
487  if (itConflicting != m_pool.mapNextTx.end()) {
488  // Disable replacement feature for good
490  "txn-mempool-conflict");
491  }
492  }
493 
494  LockPoints lp;
495  m_view.SetBackend(m_viewmempool);
496 
497  CCoinsViewCache &coins_cache = ::ChainstateActive().CoinsTip();
498  // Do all inputs exist?
499  for (const CTxIn &txin : tx.vin) {
500  if (!coins_cache.HaveCoinInCache(txin.prevout)) {
501  coins_to_uncache.push_back(txin.prevout);
502  }
503 
504  // Note: this call may add txin.prevout to the coins cache
505  // (coins_cache.cacheCoins) by way of FetchCoin(). It should be
506  // removed later (via coins_to_uncache) if this tx turns out to be
507  // invalid.
508  if (!m_view.HaveCoin(txin.prevout)) {
509  // Are inputs missing because we already have the tx?
510  for (size_t out = 0; out < tx.vout.size(); out++) {
511  // Optimistically just do efficient check of cache for
512  // outputs.
513  if (coins_cache.HaveCoinInCache(COutPoint(txid, out))) {
515  "txn-already-known");
516  }
517  }
518 
519  // Otherwise assume this might be an orphan tx for which we just
520  // haven't seen parents yet.
522  "bad-txns-inputs-missingorspent");
523  }
524  }
525 
526  // Are the actual inputs available?
527  if (!m_view.HaveInputs(tx)) {
529  "bad-txns-inputs-spent");
530  }
531 
532  // Bring the best block into scope.
533  m_view.GetBestBlock();
534 
535  // we have all inputs cached now, so switch back to dummy (to protect
536  // against bugs where we pull more inputs from disk that miss being
537  // added to coins_to_uncache)
538  m_view.SetBackend(m_dummy);
539 
540  // Only accept BIP68 sequence locked transactions that can be mined in
541  // the next block; we don't want our mempool filled up with transactions
542  // that can't be mined yet. Must keep pool.cs for this unless we change
543  // CheckSequenceLocks to take a CoinsViewCache instead of create its
544  // own.
547  "non-BIP68-final");
548  }
549 
550  Amount nFees = Amount::zero();
551  if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view),
552  nFees)) {
553  // state filled in by CheckTxInputs
554  return false;
555  }
556 
557  // If fee_out is passed, return the fee to the caller
558  if (args.m_fee_out) {
559  *args.m_fee_out = nFees;
560  }
561 
562  // Check for non-standard pay-to-script-hash in inputs
563  if (fRequireStandard &&
564  !AreInputsStandard(tx, m_view, ws.m_next_block_script_verify_flags)) {
566  "bad-txns-nonstandard-inputs");
567  }
568 
569  // nModifiedFees includes any fee deltas from PrioritiseTransaction
570  nModifiedFees = nFees;
571  m_pool.ApplyDelta(txid, nModifiedFees);
572 
573  // Keep track of transactions that spend a coinbase, which we re-scan
574  // during reorgs to ensure COINBASE_MATURITY is still met.
575  bool fSpendsCoinbase = false;
576  for (const CTxIn &txin : tx.vin) {
577  const Coin &coin = m_view.AccessCoin(txin.prevout);
578  if (coin.IsCoinBase()) {
579  fSpendsCoinbase = true;
580  break;
581  }
582  }
583 
584  unsigned int nSize = tx.GetTotalSize();
585 
586  // No transactions are allowed below minRelayTxFee except from disconnected
587  // blocks.
588  // Do not change this to use virtualsize without coordinating a network
589  // policy upgrade.
590  if (!bypass_limits && nModifiedFees < minRelayTxFee.GetFee(nSize)) {
591  return state.Invalid(
592  TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met",
593  strprintf("%d < %d", nModifiedFees, ::minRelayTxFee.GetFee(nSize)));
594  }
595 
596  // Validate input scripts against standard script flags.
597  const uint32_t scriptVerifyFlags =
598  ws.m_next_block_script_verify_flags | STANDARD_SCRIPT_VERIFY_FLAGS;
599  PrecomputedTransactionData txdata(tx);
600  if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false,
601  txdata, ws.m_sig_checks_standard)) {
602  // State filled in by CheckInputScripts
603  return false;
604  }
605 
606  entry.reset(new CTxMemPoolEntry(ptx, nFees, nAcceptTime,
607  ::ChainActive().Height(), fSpendsCoinbase,
608  ws.m_sig_checks_standard, lp));
609 
610  unsigned int nVirtualSize = entry->GetTxVirtualSize();
611 
612  Amount mempoolRejectFee =
613  m_pool
614  .GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
615  1000000)
616  .GetFee(nVirtualSize);
617  if (!bypass_limits && mempoolRejectFee > Amount::zero() &&
618  nModifiedFees < mempoolRejectFee) {
619  return state.Invalid(
620  TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met",
621  strprintf("%d < %d", nModifiedFees, mempoolRejectFee));
622  }
623 
624  // Calculate in-mempool ancestors, up to a limit.
625  std::string errString;
626  if (!m_pool.CalculateMemPoolAncestors(
627  *entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size,
628  m_limit_descendants, m_limit_descendant_size, errString)) {
630  "too-long-mempool-chain", errString);
631  }
632  return true;
633 }
634 
635 bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs &args, Workspace &ws,
636  PrecomputedTransactionData &txdata) {
637  const CTransaction &tx = *ws.m_ptx;
638  const TxId &txid = tx.GetId();
639 
640  TxValidationState &state = args.m_state;
641 
642  // Check again against the next block's script verification flags
643  // to cache our script execution flags.
644  //
645  // This is also useful in case of bugs in the standard flags that cause
646  // transactions to pass as valid when they're actually invalid. For
647  // instance the STRICTENC flag was incorrectly allowing certain CHECKSIG
648  // NOT scripts to pass, even though they were invalid.
649  //
650  // There is a similar check in CreateNewBlock() to prevent creating
651  // invalid blocks (using TestBlockValidity), however allowing such
652  // transactions into the mempool can be exploited as a DoS attack.
653  int nSigChecksConsensus;
654  if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool,
655  ws.m_next_block_script_verify_flags,
656  txdata, nSigChecksConsensus)) {
657  // This can occur under some circumstances, if the node receives an
658  // unrequested tx which is invalid due to new consensus rules not
659  // being activated yet (during IBD).
660  return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed "
661  "against next-block but not STANDARD flags %s, %s",
662  __func__, txid.ToString(), state.ToString());
663  }
664 
665  if (ws.m_sig_checks_standard != nSigChecksConsensus) {
666  // We can't accept this transaction as we've used the standard count
667  // for the mempool/mining, but the consensus count will be enforced
668  // in validation (we don't want to produce bad block templates).
669  return error(
670  "%s: BUG! PLEASE REPORT THIS! SigChecks count differed between "
671  "standard and consensus flags in %s",
672  __func__, txid.ToString());
673  }
674  return true;
675 }
676 
677 bool MemPoolAccept::Finalize(ATMPArgs &args, Workspace &ws) {
678  const TxId &txid = ws.m_ptx->GetId();
679  TxValidationState &state = args.m_state;
680  const bool bypass_limits = args.m_bypass_limits;
681 
682  CTxMemPool::setEntries &setAncestors = ws.m_ancestors;
683  std::unique_ptr<CTxMemPoolEntry> &entry = ws.m_entry;
684 
685  // Store transaction in memory.
686  m_pool.addUnchecked(*entry, setAncestors);
687 
688  // Trim mempool and check if tx was trimmed.
689  if (!bypass_limits) {
690  m_pool.LimitSize(
691  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
692  std::chrono::hours{
693  gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
694  if (!m_pool.exists(txid)) {
696  "mempool full");
697  }
698  }
699  return true;
700 }
701 
702 bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef &ptx,
703  ATMPArgs &args) {
705  // mempool "read lock" (held through
706  // GetMainSignals().TransactionAddedToMempool())
707  LOCK(m_pool.cs);
708 
709  Workspace workspace(ptx, GetNextBlockScriptFlags(
710  args.m_config.GetChainParams().GetConsensus(),
711  ::ChainActive().Tip()));
712 
713  if (!PreChecks(args, workspace)) {
714  return false;
715  }
716 
717  // Only compute the precomputed transaction data if we need to verify
718  // scripts (ie, other policy checks pass). We perform the inexpensive
719  // checks first and avoid hashing and signature verification unless those
720  // checks pass, to mitigate CPU exhaustion denial-of-service attacks.
721  PrecomputedTransactionData txdata(*ptx);
722 
723  if (!ConsensusScriptChecks(args, workspace, txdata)) {
724  return false;
725  }
726 
727  // Tx was accepted, but not added
728  if (args.m_test_accept) {
729  return true;
730  }
731 
732  if (!Finalize(args, workspace)) {
733  return false;
734  }
735 
737  ptx, m_pool.GetAndIncrementSequence());
738 
739  return true;
740 }
741 
742 } // namespace
743 
747 static bool
749  TxValidationState &state, const CTransactionRef &tx,
750  int64_t nAcceptTime, bool bypass_limits,
751  bool test_accept, Amount *fee_out = nullptr)
754  std::vector<COutPoint> coins_to_uncache;
755  MemPoolAccept::ATMPArgs args{
756  config, state, nAcceptTime, bypass_limits,
757  coins_to_uncache, test_accept, fee_out};
758  bool res = MemPoolAccept(pool).AcceptSingleTransaction(tx, args);
759  if (!res) {
760  // Remove coins that were not present in the coins cache before calling
761  // ATMPW; this is to prevent memory DoS in case we receive a large
762  // number of invalid transactions that attempt to overrun the in-memory
763  // coins cache
764  // (`CCoinsViewCache::cacheCoins`).
765 
766  for (const COutPoint &outpoint : coins_to_uncache) {
767  ::ChainstateActive().CoinsTip().Uncache(outpoint);
768  }
769  }
770 
771  // After we've (potentially) uncached entries, ensure our coins cache is
772  // still within its size limits
773  BlockValidationState stateDummy;
774  ::ChainstateActive().FlushStateToDisk(config.GetChainParams(), stateDummy,
776  return res;
777 }
778 
779 bool AcceptToMemoryPool(const Config &config, CTxMemPool &pool,
780  TxValidationState &state, const CTransactionRef &tx,
781  bool bypass_limits, bool test_accept, Amount *fee_out) {
782  return AcceptToMemoryPoolWithTime(config, pool, state, tx, GetTime(),
783  bypass_limits, test_accept, fee_out);
784 }
785 
786 CTransactionRef GetTransaction(const CBlockIndex *const block_index,
787  const CTxMemPool *const mempool,
788  const TxId &txid,
789  const Consensus::Params &consensusParams,
790  BlockHash &hashBlock) {
791  LOCK(cs_main);
792 
793  if (block_index) {
794  CBlock block;
795  if (ReadBlockFromDisk(block, block_index, consensusParams)) {
796  for (const auto &tx : block.vtx) {
797  if (tx->GetId() == txid) {
798  hashBlock = block_index->GetBlockHash();
799  return tx;
800  }
801  }
802  }
803  return nullptr;
804  }
805  if (mempool) {
806  CTransactionRef ptx = mempool->get(txid);
807  if (ptx) {
808  return ptx;
809  }
810  }
811  if (g_txindex) {
812  CTransactionRef tx;
813  if (g_txindex->FindTx(txid, hashBlock, tx)) {
814  return tx;
815  }
816  }
817  return nullptr;
818 }
819 
821 //
822 // CBlock and CBlockIndex
823 //
824 
825 static bool WriteBlockToDisk(const CBlock &block, FlatFilePos &pos,
826  const CMessageHeader::MessageMagic &messageStart) {
827  // Open history file to append
829  if (fileout.IsNull()) {
830  return error("WriteBlockToDisk: OpenBlockFile failed");
831  }
832 
833  // Write index header
834  unsigned int nSize = GetSerializeSize(block, fileout.GetVersion());
835  fileout << messageStart << nSize;
836 
837  // Write block
838  long fileOutPos = ftell(fileout.Get());
839  if (fileOutPos < 0) {
840  return error("WriteBlockToDisk: ftell failed");
841  }
842 
843  pos.nPos = (unsigned int)fileOutPos;
844  fileout << block;
845 
846  return true;
847 }
848 
849 Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) {
850  int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
851  // Force block reward to zero when right shift is undefined.
852  if (halvings >= 64) {
853  return Amount::zero();
854  }
855 
856  Amount nSubsidy = 50 * COIN;
857  // Subsidy is cut in half every 210,000 blocks which will occur
858  // approximately every 4 years.
859  return ((nSubsidy / SATOSHI) >> halvings) * SATOSHI;
860 }
861 
862 CoinsViews::CoinsViews(std::string ldb_name, size_t cache_size_bytes,
863  bool in_memory, bool should_wipe)
864  : m_dbview(GetDataDir() / ldb_name, cache_size_bytes, in_memory,
865  should_wipe),
866  m_catcherview(&m_dbview) {}
867 
868 void CoinsViews::InitCache() {
869  m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
870 }
871 
873  BlockHash from_snapshot_blockhash)
874  : m_blockman(blockman), m_mempool(mempool),
875  m_from_snapshot_blockhash(from_snapshot_blockhash) {}
876 
877 void CChainState::InitCoinsDB(size_t cache_size_bytes, bool in_memory,
878  bool should_wipe, std::string leveldb_name) {
880  leveldb_name += "_" + m_from_snapshot_blockhash.ToString();
881  }
882  m_coins_views = std::make_unique<CoinsViews>(leveldb_name, cache_size_bytes,
883  in_memory, should_wipe);
884 }
885 
886 void CChainState::InitCoinsCache(size_t cache_size_bytes) {
887  assert(m_coins_views != nullptr);
888  m_coinstip_cache_size_bytes = cache_size_bytes;
889  m_coins_views->InitCache();
890 }
891 
892 // Note that though this is marked const, we may end up modifying
893 // `m_cached_finished_ibd`, which is a performance-related implementation
894 // detail. This function must be marked `const` so that `CValidationInterface`
895 // clients (which are given a `const CChainState*`) can call it.
896 //
898  // Optimization: pre-test latch before taking the lock.
899  if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
900  return false;
901  }
902 
903  LOCK(cs_main);
904  if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
905  return false;
906  }
907  if (fImporting || fReindex) {
908  return true;
909  }
910  if (m_chain.Tip() == nullptr) {
911  return true;
912  }
914  return true;
915  }
916  if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) {
917  return true;
918  }
919  LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
920  m_cached_finished_ibd.store(true, std::memory_order_relaxed);
921  return false;
922 }
923 
924 static CBlockIndex const *pindexBestForkTip = nullptr;
925 static CBlockIndex const *pindexBestForkBase = nullptr;
926 
927 static void AlertNotify(const std::string &strMessage) {
928  uiInterface.NotifyAlertChanged();
929 #if defined(HAVE_SYSTEM)
930  std::string strCmd = gArgs.GetArg("-alertnotify", "");
931  if (strCmd.empty()) {
932  return;
933  }
934 
935  // Alert text should be plain ascii coming from a trusted source, but to be
936  // safe we first strip anything not in safeChars, then add single quotes
937  // around the whole string before passing it to the shell:
938  std::string singleQuote("'");
939  std::string safeStatus = SanitizeString(strMessage);
940  safeStatus = singleQuote + safeStatus + singleQuote;
941  boost::replace_all(strCmd, "%s", safeStatus);
942 
943  std::thread t(runCommand, strCmd);
944  // thread runs free
945  t.detach();
946 #endif
947 }
948 
951  // Before we get past initial download, we cannot reliably alert about forks
952  // (we assume we don't get stuck on a fork before finishing our initial
953  // sync)
954  if (::ChainstateActive().IsInitialBlockDownload()) {
955  return;
956  }
957 
958  // If our best fork is no longer within 72 blocks (+/- 12 hours if no one
959  // mines it) of our head, drop it
960  if (pindexBestForkTip &&
961  ::ChainActive().Height() - pindexBestForkTip->nHeight >= 72) {
962  pindexBestForkTip = nullptr;
963  }
964 
965  if (pindexBestForkTip ||
966  (pindexBestInvalid &&
967  pindexBestInvalid->nChainWork >
968  ::ChainActive().Tip()->nChainWork +
969  (GetBlockProof(*::ChainActive().Tip()) * 6))) {
971  std::string warning =
972  std::string("'Warning: Large-work fork detected, forking after "
973  "block ") +
974  pindexBestForkBase->phashBlock->ToString() + std::string("'");
975  AlertNotify(warning);
976  }
977 
979  LogPrintf("%s: Warning: Large fork found\n forking the "
980  "chain at height %d (%s)\n lasting to height %d "
981  "(%s).\nChain state database corruption likely.\n",
982  __func__, pindexBestForkBase->nHeight,
987  } else {
988  LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks "
989  "longer than our best chain.\nChain state database "
990  "corruption likely.\n",
991  __func__);
993  }
994  } else {
995  SetfLargeWorkForkFound(false);
997  }
998 }
999 
1000 static void CheckForkWarningConditionsOnNewFork(CBlockIndex *pindexNewForkTip)
1003  // If we are on a fork that is sufficiently large, set a warning flag.
1004  const CBlockIndex *pfork = ::ChainActive().FindFork(pindexNewForkTip);
1005 
1006  // We define a condition where we should warn the user about as a fork of at
1007  // least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines
1008  // it) of ours. We use 7 blocks rather arbitrarily as it represents just
1009  // under 10% of sustained network hash rate operating on the fork, or a
1010  // chain that is entirely longer than ours and invalid (note that this
1011  // should be detected by both). We define it this way because it allows us
1012  // to only store the highest fork tip (+ base) which meets the 7-block
1013  // condition and from this always have the most-likely-to-cause-warning fork
1014  if (pfork &&
1015  (!pindexBestForkTip ||
1016  pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) &&
1017  pindexNewForkTip->nChainWork - pfork->nChainWork >
1018  (GetBlockProof(*pfork) * 7) &&
1019  ::ChainActive().Height() - pindexNewForkTip->nHeight < 72) {
1020  pindexBestForkTip = pindexNewForkTip;
1021  pindexBestForkBase = pfork;
1022  }
1023 
1025 }
1026 
1027 // Called both upon regular invalid block discovery *and* InvalidateBlock
1030  if (!pindexBestInvalid ||
1031  pindexNew->nChainWork > pindexBestInvalid->nChainWork) {
1032  pindexBestInvalid = pindexNew;
1033  }
1034  if (pindexBestHeader != nullptr &&
1035  pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
1037  }
1038 
1039  // If the invalid chain found is supposed to be finalized, we need to move
1040  // back the finalization point.
1041  if (IsBlockFinalized(pindexNew)) {
1042  m_finalizedBlockIndex = pindexNew->pprev;
1043  }
1044 
1045  LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n",
1046  __func__, pindexNew->GetBlockHash().ToString(),
1047  pindexNew->nHeight,
1048  log(pindexNew->nChainWork.getdouble()) / log(2.0),
1049  FormatISO8601DateTime(pindexNew->GetBlockTime()));
1050  CBlockIndex *tip = ::ChainActive().Tip();
1051  assert(tip);
1052  LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n",
1053  __func__, tip->GetBlockHash().ToString(),
1054  ::ChainActive().Height(),
1055  log(tip->nChainWork.getdouble()) / log(2.0),
1057 }
1058 
1059 // Same as InvalidChainFound, above, except not called directly from
1060 // InvalidateBlock, which does its own setBlockIndexCandidates management.
1062  const BlockValidationState &state) {
1064  pindex->nStatus = pindex->nStatus.withFailed();
1065  m_blockman.m_failed_blocks.insert(pindex);
1066  setDirtyBlockIndex.insert(pindex);
1067  InvalidChainFound(pindex);
1068  }
1069 }
1070 
1071 void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
1072  int nHeight) {
1073  // Mark inputs spent.
1074  if (tx.IsCoinBase()) {
1075  return;
1076  }
1077 
1078  txundo.vprevout.reserve(tx.vin.size());
1079  for (const CTxIn &txin : tx.vin) {
1080  txundo.vprevout.emplace_back();
1081  bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back());
1082  assert(is_spent);
1083  }
1084 }
1085 
1086 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
1087  int nHeight) {
1088  SpendCoins(view, tx, txundo, nHeight);
1089  AddCoins(view, tx, nHeight);
1090 }
1091 
1092 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, int nHeight) {
1093  // Mark inputs spent.
1094  if (!tx.IsCoinBase()) {
1095  for (const CTxIn &txin : tx.vin) {
1096  bool is_spent = view.SpendCoin(txin.prevout);
1097  assert(is_spent);
1098  }
1099  }
1100 
1101  // Add outputs.
1102  AddCoins(view, tx, nHeight);
1103 }
1104 
1106  const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1107  if (!VerifyScript(scriptSig, m_tx_out.scriptPubKey, nFlags,
1110  metrics, &error)) {
1111  return false;
1112  }
1113  if ((pTxLimitSigChecks &&
1117  // we can't assign a meaningful script error (since the script
1118  // succeeded), but remove the ScriptError::OK which could be
1119  // misinterpreted.
1121  return false;
1122  }
1123  return true;
1124 }
1125 
1126 int GetSpendHeight(const CCoinsViewCache &inputs) {
1127  LOCK(cs_main);
1128  CBlockIndex *pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
1129  return pindexPrev->nHeight + 1;
1130 }
1131 
1133  const CCoinsViewCache &inputs, const uint32_t flags,
1134  bool sigCacheStore, bool scriptCacheStore,
1135  const PrecomputedTransactionData &txdata,
1136  int &nSigChecksOut, TxSigCheckLimiter &txLimitSigChecks,
1137  CheckInputsLimiter *pBlockLimitSigChecks,
1138  std::vector<CScriptCheck> *pvChecks) {
1140  assert(!tx.IsCoinBase());
1141 
1142  if (pvChecks) {
1143  pvChecks->reserve(tx.vin.size());
1144  }
1145 
1146  // First check if script executions have been cached with the same flags.
1147  // Note that this assumes that the inputs provided are correct (ie that the
1148  // transaction hash which is in tx's prevouts properly commits to the
1149  // scriptPubKey in the inputs view of that transaction).
1150  ScriptCacheKey hashCacheEntry(tx, flags);
1151  if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore, nSigChecksOut)) {
1152  if (!txLimitSigChecks.consume_and_check(nSigChecksOut) ||
1153  (pBlockLimitSigChecks &&
1154  !pBlockLimitSigChecks->consume_and_check(nSigChecksOut))) {
1156  "too-many-sigchecks");
1157  }
1158  return true;
1159  }
1160 
1161  int nSigChecksTotal = 0;
1162 
1163  for (size_t i = 0; i < tx.vin.size(); i++) {
1164  const COutPoint &prevout = tx.vin[i].prevout;
1165  const Coin &coin = inputs.AccessCoin(prevout);
1166  assert(!coin.IsSpent());
1167 
1168  // We very carefully only pass in things to CScriptCheck which are
1169  // clearly committed to by tx's hash. This provides a sanity
1170  // check that our caching is not introducing consensus failures through
1171  // additional data in, eg, the coins being spent being checked as a part
1172  // of CScriptCheck.
1173 
1174  // Verify signature
1175  CScriptCheck check(coin.GetTxOut(), tx, i, flags, sigCacheStore, txdata,
1176  &txLimitSigChecks, pBlockLimitSigChecks);
1177 
1178  // If pvChecks is not null, defer the check execution to the caller.
1179  if (pvChecks) {
1180  pvChecks->push_back(std::move(check));
1181  continue;
1182  }
1183 
1184  if (!check()) {
1185  ScriptError scriptError = check.GetScriptError();
1186  // Compute flags without the optional standardness flags.
1187  // This differs from MANDATORY_SCRIPT_VERIFY_FLAGS as it contains
1188  // additional upgrade flags (see AcceptToMemoryPoolWorker variable
1189  // extraFlags).
1190  uint32_t mandatoryFlags =
1192  if (flags != mandatoryFlags) {
1193  // Check whether the failure was caused by a non-mandatory
1194  // script verification check. If so, ensure we return
1195  // NOT_STANDARD instead of CONSENSUS to avoid downstream users
1196  // splitting the network between upgraded and non-upgraded nodes
1197  // by banning CONSENSUS-failing data providers.
1198  CScriptCheck check2(coin.GetTxOut(), tx, i, mandatoryFlags,
1199  sigCacheStore, txdata);
1200  if (check2()) {
1201  return state.Invalid(
1203  strprintf("non-mandatory-script-verify-flag (%s)",
1204  ScriptErrorString(scriptError)));
1205  }
1206  // update the error message to reflect the mandatory violation.
1207  scriptError = check2.GetScriptError();
1208  }
1209 
1210  // MANDATORY flag failures correspond to
1211  // TxValidationResult::TX_CONSENSUS. Because CONSENSUS failures are
1212  // the most serious case of validation failures, we may need to
1213  // consider using RECENT_CONSENSUS_CHANGE for any script failure
1214  // that could be due to non-upgraded nodes which we may want to
1215  // support, to avoid splitting the network (but this depends on the
1216  // details of how net_processing handles such errors).
1217  return state.Invalid(
1219  strprintf("mandatory-script-verify-flag-failed (%s)",
1220  ScriptErrorString(scriptError)));
1221  }
1222 
1223  nSigChecksTotal += check.GetScriptExecutionMetrics().nSigChecks;
1224  }
1225 
1226  nSigChecksOut = nSigChecksTotal;
1227 
1228  if (scriptCacheStore && !pvChecks) {
1229  // We executed all of the provided scripts, and were told to cache the
1230  // result. Do so now.
1231  AddKeyInScriptCache(hashCacheEntry, nSigChecksTotal);
1232  }
1233 
1234  return true;
1235 }
1236 
1237 static bool UndoWriteToDisk(const CBlockUndo &blockundo, FlatFilePos &pos,
1238  const BlockHash &hashBlock,
1239  const CMessageHeader::MessageMagic &messageStart) {
1240  // Open history file to append
1242  if (fileout.IsNull()) {
1243  return error("%s: OpenUndoFile failed", __func__);
1244  }
1245 
1246  // Write index header
1247  unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
1248  fileout << messageStart << nSize;
1249 
1250  // Write undo data
1251  long fileOutPos = ftell(fileout.Get());
1252  if (fileOutPos < 0) {
1253  return error("%s: ftell failed", __func__);
1254  }
1255  pos.nPos = (unsigned int)fileOutPos;
1256  fileout << blockundo;
1257 
1258  // calculate & write checksum
1260  hasher << hashBlock;
1261  hasher << blockundo;
1262  fileout << hasher.GetHash();
1263 
1264  return true;
1265 }
1266 
1267 bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex *pindex) {
1268  FlatFilePos pos = pindex->GetUndoPos();
1269  if (pos.IsNull()) {
1270  return error("%s: no undo data available", __func__);
1271  }
1272 
1273  // Open history file to read
1274  CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
1275  if (filein.IsNull()) {
1276  return error("%s: OpenUndoFile failed", __func__);
1277  }
1278 
1279  // Read block
1280  uint256 hashChecksum;
1281  // We need a CHashVerifier as reserializing may lose data
1282  CHashVerifier<CAutoFile> verifier(&filein);
1283  try {
1284  verifier << pindex->pprev->GetBlockHash();
1285  verifier >> blockundo;
1286  filein >> hashChecksum;
1287  } catch (const std::exception &e) {
1288  return error("%s: Deserialize or I/O error - %s", __func__, e.what());
1289  }
1290 
1291  // Verify checksum
1292  if (hashChecksum != verifier.GetHash()) {
1293  return error("%s: Checksum mismatch", __func__);
1294  }
1295 
1296  return true;
1297 }
1298 
1300 static bool AbortNode(const std::string &strMessage,
1301  bilingual_str user_message = bilingual_str()) {
1302  SetMiscWarning(Untranslated(strMessage));
1303  LogPrintf("*** %s\n", strMessage);
1304  if (!user_message.empty()) {
1305  user_message =
1306  _("A fatal internal error occurred, see debug.log for details");
1307  }
1308  AbortError(user_message);
1309  StartShutdown();
1310  return false;
1311 }
1312 
1313 static bool AbortNode(BlockValidationState &state,
1314  const std::string &strMessage,
1315  const bilingual_str &userMessage = bilingual_str()) {
1316  AbortNode(strMessage, userMessage);
1317  return state.Error(strMessage);
1318 }
1319 
1322  const COutPoint &out) {
1323  bool fClean = true;
1324 
1325  if (view.HaveCoin(out)) {
1326  // Overwriting transaction output.
1327  fClean = false;
1328  }
1329 
1330  if (undo.GetHeight() == 0) {
1331  // Missing undo metadata (height and coinbase). Older versions included
1332  // this information only in undo records for the last spend of a
1333  // transactions' outputs. This implies that it must be present for some
1334  // other output of the same tx.
1335  const Coin &alternate = AccessByTxid(view, out.GetTxId());
1336  if (alternate.IsSpent()) {
1337  // Adding output for transaction without known metadata
1338  return DisconnectResult::FAILED;
1339  }
1340 
1341  // This is somewhat ugly, but hopefully utility is limited. This is only
1342  // useful when working from legacy on disck data. In any case, putting
1343  // the correct information in there doesn't hurt.
1344  const_cast<Coin &>(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(),
1345  alternate.IsCoinBase());
1346  }
1347 
1348  // If the coin already exists as an unspent coin in the cache, then the
1349  // possible_overwrite parameter to AddCoin must be set to true. We have
1350  // already checked whether an unspent coin exists above using HaveCoin, so
1351  // we don't need to guess. When fClean is false, an unspent coin already
1352  // existed and it is an overwrite.
1353  view.AddCoin(out, std::move(undo), !fClean);
1354 
1356 }
1357 
1363  const CBlockIndex *pindex,
1364  CCoinsViewCache &view) {
1365  CBlockUndo blockUndo;
1366  if (!UndoReadFromDisk(blockUndo, pindex)) {
1367  error("DisconnectBlock(): failure reading undo data");
1368  return DisconnectResult::FAILED;
1369  }
1370 
1371  return ApplyBlockUndo(blockUndo, block, pindex, view);
1372 }
1373 
1375  const CBlock &block, const CBlockIndex *pindex,
1376  CCoinsViewCache &view) {
1377  bool fClean = true;
1378 
1379  if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1380  error("DisconnectBlock(): block and undo data inconsistent");
1381  return DisconnectResult::FAILED;
1382  }
1383 
1384  // First, restore inputs.
1385  for (size_t i = 1; i < block.vtx.size(); i++) {
1386  const CTransaction &tx = *(block.vtx[i]);
1387  const CTxUndo &txundo = blockUndo.vtxundo[i - 1];
1388  if (txundo.vprevout.size() != tx.vin.size()) {
1389  error("DisconnectBlock(): transaction and undo data inconsistent");
1390  return DisconnectResult::FAILED;
1391  }
1392 
1393  for (size_t j = 0; j < tx.vin.size(); j++) {
1394  const COutPoint &out = tx.vin[j].prevout;
1395  const Coin &undo = txundo.vprevout[j];
1396  DisconnectResult res = UndoCoinSpend(undo, view, out);
1397  if (res == DisconnectResult::FAILED) {
1398  return DisconnectResult::FAILED;
1399  }
1400  fClean = fClean && res != DisconnectResult::UNCLEAN;
1401  }
1402  }
1403 
1404  // Second, revert created outputs.
1405  for (const auto &ptx : block.vtx) {
1406  const CTransaction &tx = *ptx;
1407  const TxId &txid = tx.GetId();
1408  const bool is_coinbase = tx.IsCoinBase();
1409 
1410  // Check that all outputs are available and match the outputs in the
1411  // block itself exactly.
1412  for (size_t o = 0; o < tx.vout.size(); o++) {
1413  if (tx.vout[o].scriptPubKey.IsUnspendable()) {
1414  continue;
1415  }
1416 
1417  COutPoint out(txid, o);
1418  Coin coin;
1419  bool is_spent = view.SpendCoin(out, &coin);
1420  if (!is_spent || tx.vout[o] != coin.GetTxOut() ||
1421  uint32_t(pindex->nHeight) != coin.GetHeight() ||
1422  is_coinbase != coin.IsCoinBase()) {
1423  // transaction output mismatch
1424  fClean = false;
1425  }
1426  }
1427  }
1428 
1429  // Move best block pointer to previous block.
1430  view.SetBestBlock(block.hashPrevBlock);
1431 
1433 }
1434 
1435 static void FlushUndoFile(int block_file, bool finalize = false) {
1436  FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize);
1437  if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
1438  AbortNode("Flushing undo file to disk failed. This is likely the "
1439  "result of an I/O error.");
1440  }
1441 }
1442 
1443 static void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false) {
1444  LOCK(cs_LastBlockFile);
1445  FlatFilePos block_pos_old(nLastBlockFile,
1446  vinfoBlockFile[nLastBlockFile].nSize);
1447  if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
1448  AbortNode("Flushing block file to disk failed. This is likely the "
1449  "result of an I/O error.");
1450  }
1451  // we do not always flush the undo file, as the chain tip may be lagging
1452  // behind the incoming blocks,
1453  // e.g. during IBD or a sync after a node going offline
1454  if (!fFinalize || finalize_undo) {
1455  FlushUndoFile(nLastBlockFile, finalize_undo);
1456  }
1457 }
1458 
1459 static bool FindUndoPos(BlockValidationState &state, int nFile,
1460  FlatFilePos &pos, unsigned int nAddSize);
1461 
1462 static bool WriteUndoDataForBlock(const CBlockUndo &blockundo,
1463  BlockValidationState &state,
1464  CBlockIndex *pindex,
1465  const CChainParams &chainparams) {
1466  // Write undo information to disk
1467  if (pindex->GetUndoPos().IsNull()) {
1468  FlatFilePos _pos;
1469  if (!FindUndoPos(state, pindex->nFile, _pos,
1470  ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) {
1471  return error("ConnectBlock(): FindUndoPos failed");
1472  }
1473  if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(),
1474  chainparams.DiskMagic())) {
1475  return AbortNode(state, "Failed to write undo data");
1476  }
1477  // rev files are written in block height order, whereas blk files are
1478  // written as blocks come in (often out of order) we want to flush the
1479  // rev (undo) file once we've written the last block, which is indicated
1480  // by the last height in the block file info as below; note that this
1481  // does not catch the case where the undo writes are keeping up with the
1482  // block writes (usually when a synced up node is getting newly mined
1483  // blocks) -- this case is caught in the FindBlockPos function
1484  if (_pos.nFile < nLastBlockFile &&
1485  static_cast<uint32_t>(pindex->nHeight) ==
1486  vinfoBlockFile[_pos.nFile].nHeightLast) {
1487  FlushUndoFile(_pos.nFile, true);
1488  }
1489 
1490  // update nUndoPos in block index
1491  pindex->nUndoPos = _pos.nPos;
1492  pindex->nStatus = pindex->nStatus.withUndo();
1493  setDirtyBlockIndex.insert(pindex);
1494  }
1495 
1496  return true;
1497 }
1498 
1500 
1501 void ThreadScriptCheck(int worker_num) {
1502  util::ThreadRename(strprintf("scriptch.%i", worker_num));
1503  scriptcheckqueue.Thread();
1504 }
1505 
1506 VersionBitsCache versionbitscache GUARDED_BY(cs_main);
1507 
1508 int32_t ComputeBlockVersion(const CBlockIndex *pindexPrev,
1509  const Consensus::Params &params) {
1510  LOCK(cs_main);
1511  int32_t nVersion = VERSIONBITS_TOP_BITS;
1512 
1513  for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
1515  pindexPrev, params, static_cast<Consensus::DeploymentPos>(i),
1516  versionbitscache);
1517  if (state == ThresholdState::LOCKED_IN ||
1518  state == ThresholdState::STARTED) {
1519  nVersion |= VersionBitsMask(
1520  params, static_cast<Consensus::DeploymentPos>(i));
1521  }
1522  }
1523 
1524  // Clear the last 4 bits (miner fund activation).
1525  return nVersion & ~uint32_t(0x0f);
1526 }
1527 
1528 // Returns the script flags which should be checked for the block after
1529 // the given block.
1530 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
1531  const CBlockIndex *pindex) {
1532  uint32_t flags = SCRIPT_VERIFY_NONE;
1533 
1534  // Start enforcing P2SH (BIP16)
1535  if ((pindex->nHeight + 1) >= params.BIP16Height) {
1537  }
1538 
1539  // Start enforcing the DERSIG (BIP66) rule.
1540  if ((pindex->nHeight + 1) >= params.BIP66Height) {
1542  }
1543 
1544  // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule.
1545  if ((pindex->nHeight + 1) >= params.BIP65Height) {
1547  }
1548 
1549  // Start enforcing CSV (BIP68, BIP112 and BIP113) rule.
1550  if ((pindex->nHeight + 1) >= params.CSVHeight) {
1552  }
1553 
1554  // If the UAHF is enabled, we start accepting replay protected txns
1555  if (IsUAHFenabled(params, pindex)) {
1558  }
1559 
1560  // If the DAA HF is enabled, we start rejecting transaction that use a high
1561  // s in their signature. We also make sure that signature that are supposed
1562  // to fail (for instance in multisig or other forms of smart contracts) are
1563  // null.
1564  if (IsDAAEnabled(params, pindex)) {
1567  }
1568 
1569  // When the magnetic anomaly fork is enabled, we start accepting
1570  // transactions using the OP_CHECKDATASIG opcode and it's verify
1571  // alternative. We also start enforcing push only signatures and
1572  // clean stack.
1573  if (IsMagneticAnomalyEnabled(params, pindex)) {
1577  }
1578 
1579  if (IsGravitonEnabled(params, pindex)) {
1582  }
1583 
1584  if (IsPhononEnabled(params, pindex)) {
1586  }
1587 
1588  // We make sure this node will have replay protection during the next hard
1589  // fork.
1590  if (IsReplayProtectionEnabled(params, pindex)) {
1592  }
1593 
1594  return flags;
1595 }
1596 
1597 static int64_t nTimeCheck = 0;
1598 static int64_t nTimeForks = 0;
1599 static int64_t nTimeVerify = 0;
1600 static int64_t nTimeConnect = 0;
1601 static int64_t nTimeIndex = 0;
1602 static int64_t nTimeCallbacks = 0;
1603 static int64_t nTimeTotal = 0;
1604 static int64_t nBlocksTotal = 0;
1605 
1613  CBlockIndex *pindex, CCoinsViewCache &view,
1614  const CChainParams &params,
1615  BlockValidationOptions options,
1616  bool fJustCheck) {
1618  assert(pindex);
1619  assert(*pindex->phashBlock == block.GetHash());
1620  int64_t nTimeStart = GetTimeMicros();
1621 
1622  const Consensus::Params &consensusParams = params.GetConsensus();
1623 
1624  // Check it again in case a previous version let a bad block in
1625  // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
1626  // ContextualCheckBlockHeader() here. This means that if we add a new
1627  // consensus rule that is enforced in one of those two functions, then we
1628  // may have let in a block that violates the rule prior to updating the
1629  // software, and we would NOT be enforcing the rule here. Fully solving
1630  // upgrade from one software version to the next after a consensus rule
1631  // change is potentially tricky and issue-specific.
1632  // Also, currently the rule against blocks more than 2 hours in the future
1633  // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
1634  // re-enforce that rule here (at least until we make it impossible for
1635  // GetAdjustedTime() to go backward).
1636  if (!CheckBlock(block, state, consensusParams,
1637  options.withCheckPoW(!fJustCheck)
1638  .withCheckMerkleRoot(!fJustCheck))) {
1640  // We don't write down blocks to disk if they may have been
1641  // corrupted, so this should be impossible unless we're having
1642  // hardware problems.
1643  return AbortNode(state, "Corrupt block found indicating potential "
1644  "hardware failure; shutting down");
1645  }
1646  return error("%s: Consensus::CheckBlock: %s", __func__,
1647  state.ToString());
1648  }
1649 
1650  // Verify that the view's current state corresponds to the previous block
1651  BlockHash hashPrevBlock =
1652  pindex->pprev == nullptr ? BlockHash() : pindex->pprev->GetBlockHash();
1653  assert(hashPrevBlock == view.GetBestBlock());
1654 
1655  nBlocksTotal++;
1656 
1657  // Special case for the genesis block, skipping connection of its
1658  // transactions (its coinbase is unspendable)
1659  if (block.GetHash() == consensusParams.hashGenesisBlock) {
1660  if (!fJustCheck) {
1661  view.SetBestBlock(pindex->GetBlockHash());
1662  }
1663 
1664  return true;
1665  }
1666 
1667  bool fScriptChecks = true;
1668  if (!hashAssumeValid.IsNull()) {
1669  // We've been configured with the hash of a block which has been
1670  // externally verified to have a valid history. A suitable default value
1671  // is included with the software and updated from time to time. Because
1672  // validity relative to a piece of software is an objective fact these
1673  // defaults can be easily reviewed. This setting doesn't force the
1674  // selection of any particular chain but makes validating some faster by
1675  // effectively caching the result of part of the verification.
1676  BlockMap::const_iterator it =
1677  m_blockman.m_block_index.find(hashAssumeValid);
1678  if (it != m_blockman.m_block_index.end()) {
1679  if (it->second->GetAncestor(pindex->nHeight) == pindex &&
1680  pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
1682  // This block is a member of the assumed verified chain and an
1683  // ancestor of the best header.
1684  // Script verification is skipped when connecting blocks under
1685  // the assumevalid block. Assuming the assumevalid block is
1686  // valid this is safe because block merkle hashes are still
1687  // computed and checked, Of course, if an assumed valid block is
1688  // invalid due to false scriptSigs this optimization would allow
1689  // an invalid chain to be accepted.
1690  // The equivalent time check discourages hash power from
1691  // extorting the network via DOS attack into accepting an
1692  // invalid block through telling users they must manually set
1693  // assumevalid. Requiring a software change or burying the
1694  // invalid block, regardless of the setting, makes it hard to
1695  // hide the implication of the demand. This also avoids having
1696  // release candidates that are hardly doing any signature
1697  // verification at all in testing without having to artificially
1698  // set the default assumed verified block further back. The test
1699  // against nMinimumChainWork prevents the skipping when denied
1700  // access to any chain at least as good as the expected chain.
1701  fScriptChecks =
1703  *pindexBestHeader, *pindex, *pindexBestHeader,
1704  consensusParams) <= 60 * 60 * 24 * 7 * 2);
1705  }
1706  }
1707  }
1708 
1709  int64_t nTime1 = GetTimeMicros();
1710  nTimeCheck += nTime1 - nTimeStart;
1711  LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n",
1712  MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO,
1714 
1715  // Do not allow blocks that contain transactions which 'overwrite' older
1716  // transactions, unless those are already completely spent. If such
1717  // overwrites are allowed, coinbases and transactions depending upon those
1718  // can be duplicated to remove the ability to spend the first instance --
1719  // even after being sent to another address.
1720  // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html
1721  // for more information. This logic is not necessary for memory pool
1722  // transactions, as AcceptToMemoryPool already refuses previously-known
1723  // transaction ids entirely. This rule was originally applied to all blocks
1724  // with a timestamp after March 15, 2012, 0:00 UTC. Now that the whole
1725  // chain is irreversibly beyond that time it is applied to all blocks
1726  // except the two in the chain that violate it. This prevents exploiting
1727  // the issue against nodes during their initial block download.
1728  bool fEnforceBIP30 = !((pindex->nHeight == 91842 &&
1729  pindex->GetBlockHash() ==
1730  uint256S("0x00000000000a4d0a398161ffc163c503763"
1731  "b1f4360639393e0e4c8e300e0caec")) ||
1732  (pindex->nHeight == 91880 &&
1733  pindex->GetBlockHash() ==
1734  uint256S("0x00000000000743f190a18c5577a3c2d2a1f"
1735  "610ae9601ac046a38084ccb7cd721")));
1736 
1737  // Once BIP34 activated it was not possible to create new duplicate
1738  // coinbases and thus other than starting with the 2 existing duplicate
1739  // coinbase pairs, not possible to create overwriting txs. But by the time
1740  // BIP34 activated, in each of the existing pairs the duplicate coinbase had
1741  // overwritten the first before the first had been spent. Since those
1742  // coinbases are sufficiently buried it's no longer possible to create
1743  // further duplicate transactions descending from the known pairs either. If
1744  // we're on the known chain at height greater than where BIP34 activated, we
1745  // can save the db accesses needed for the BIP30 check.
1746 
1747  // BIP34 requires that a block at height X (block X) has its coinbase
1748  // scriptSig start with a CScriptNum of X (indicated height X). The above
1749  // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
1750  // case that there is a block X before the BIP34 height of 227,931 which has
1751  // an indicated height Y where Y is greater than X. The coinbase for block
1752  // X would also be a valid coinbase for block Y, which could be a BIP30
1753  // violation. An exhaustive search of all mainnet coinbases before the
1754  // BIP34 height which have an indicated height greater than the block height
1755  // reveals many occurrences. The 3 lowest indicated heights found are
1756  // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
1757  // heights would be the first opportunity for BIP30 to be violated.
1758 
1759  // The search reveals a great many blocks which have an indicated height
1760  // greater than 1,983,702, so we simply remove the optimization to skip
1761  // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
1762  // that block in another 25 years or so, we should take advantage of a
1763  // future consensus change to do a new and improved version of BIP34 that
1764  // will actually prevent ever creating any duplicate coinbases in the
1765  // future.
1766  static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
1767 
1768  // There is no potential to create a duplicate coinbase at block 209,921
1769  // because this is still before the BIP34 height and so explicit BIP30
1770  // checking is still active.
1771 
1772  // The final case is block 176,684 which has an indicated height of
1773  // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
1774  // before block 490,897 so there was not much opportunity to address this
1775  // case other than to carefully analyze it and determine it would not be a
1776  // problem. Block 490,897 was, in fact, mined with a different coinbase than
1777  // block 176,684, but it is important to note that even if it hadn't been or
1778  // is remined on an alternate fork with a duplicate coinbase, we would still
1779  // not run into a BIP30 violation. This is because the coinbase for 176,684
1780  // is spent in block 185,956 in transaction
1781  // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
1782  // spending transaction can't be duplicated because it also spends coinbase
1783  // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
1784  // coinbase has an indicated height of over 4.2 billion, and wouldn't be
1785  // duplicatable until that height, and it's currently impossible to create a
1786  // chain that long. Nevertheless we may wish to consider a future soft fork
1787  // which retroactively prevents block 490,897 from creating a duplicate
1788  // coinbase. The two historical BIP30 violations often provide a confusing
1789  // edge case when manipulating the UTXO and it would be simpler not to have
1790  // another edge case to deal with.
1791 
1792  // testnet3 has no blocks before the BIP34 height with indicated heights
1793  // post BIP34 before approximately height 486,000,000 and presumably will
1794  // be reset before it reaches block 1,983,702 and starts doing unnecessary
1795  // BIP30 checking again.
1796  assert(pindex->pprev);
1797  CBlockIndex *pindexBIP34height =
1798  pindex->pprev->GetAncestor(consensusParams.BIP34Height);
1799  // Only continue to enforce if we're below BIP34 activation height or the
1800  // block hash at that height doesn't correspond.
1801  fEnforceBIP30 =
1802  fEnforceBIP30 &&
1803  (!pindexBIP34height ||
1804  !(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash));
1805 
1806  // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have
1807  // a consensus change that ensures coinbases at those heights can not
1808  // duplicate earlier coinbases.
1809  if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
1810  for (const auto &tx : block.vtx) {
1811  for (size_t o = 0; o < tx->vout.size(); o++) {
1812  if (view.HaveCoin(COutPoint(tx->GetId(), o))) {
1813  LogPrintf("ERROR: ConnectBlock(): tried to overwrite "
1814  "transaction\n");
1816  "bad-txns-BIP30");
1817  }
1818  }
1819  }
1820  }
1821 
1822  // Start enforcing BIP68 (sequence locks).
1823  int nLockTimeFlags = 0;
1824  if (pindex->nHeight >= consensusParams.CSVHeight) {
1825  nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
1826  }
1827 
1828  const uint32_t flags =
1829  GetNextBlockScriptFlags(consensusParams, pindex->pprev);
1830 
1831  int64_t nTime2 = GetTimeMicros();
1832  nTimeForks += nTime2 - nTime1;
1833  LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n",
1834  MILLI * (nTime2 - nTime1), nTimeForks * MICRO,
1836 
1837  std::vector<int> prevheights;
1838  Amount nFees = Amount::zero();
1839  int nInputs = 0;
1840 
1841  // Limit the total executed signature operations in the block, a consensus
1842  // rule. Tracking during the CPU-consuming part (validation of uncached
1843  // inputs) is per-input atomic and validation in each thread stops very
1844  // quickly after the limit is exceeded, so an adversary cannot cause us to
1845  // exceed the limit by much at all.
1846  CheckInputsLimiter nSigChecksBlockLimiter(
1848 
1849  std::vector<TxSigCheckLimiter> nSigChecksTxLimiters;
1850  nSigChecksTxLimiters.resize(block.vtx.size() - 1);
1851 
1852  CBlockUndo blockundo;
1853  blockundo.vtxundo.resize(block.vtx.size() - 1);
1854 
1855  CCheckQueueControl<CScriptCheck> control(fScriptChecks ? &scriptcheckqueue
1856  : nullptr);
1857 
1858  // Add all outputs
1859  try {
1860  for (const auto &ptx : block.vtx) {
1861  AddCoins(view, *ptx, pindex->nHeight);
1862  }
1863  } catch (const std::logic_error &e) {
1864  // This error will be thrown from AddCoin if we try to connect a block
1865  // containing duplicate transactions. Such a thing should normally be
1866  // caught early nowadays (due to ContextualCheckBlock's CTOR
1867  // enforcement) however some edge cases can escape that:
1868  // - ContextualCheckBlock does not get re-run after saving the block to
1869  // disk, and older versions may have saved a weird block.
1870  // - its checks are not applied to pre-CTOR chains, which we might visit
1871  // with checkpointing off.
1872  LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
1874  "tx-duplicate");
1875  }
1876 
1877  size_t txIndex = 0;
1878  for (const auto &ptx : block.vtx) {
1879  const CTransaction &tx = *ptx;
1880  const bool isCoinBase = tx.IsCoinBase();
1881  nInputs += tx.vin.size();
1882 
1883  {
1884  Amount txfee = Amount::zero();
1885  TxValidationState tx_state;
1886  if (!isCoinBase &&
1887  !Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight,
1888  txfee)) {
1889  // Any transaction validation failure in ConnectBlock is a block
1890  // consensus failure.
1892  tx_state.GetRejectReason(),
1893  tx_state.GetDebugMessage());
1894 
1895  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
1896  tx.GetId().ToString(), state.ToString());
1897  }
1898  nFees += txfee;
1899  }
1900 
1901  if (!MoneyRange(nFees)) {
1902  LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n",
1903  __func__);
1905  "bad-txns-accumulated-fee-outofrange");
1906  }
1907 
1908  // The following checks do not apply to the coinbase.
1909  if (isCoinBase) {
1910  continue;
1911  }
1912 
1913  // Check that transaction is BIP68 final BIP68 lock checks (as
1914  // opposed to nLockTime checks) must be in ConnectBlock because they
1915  // require the UTXO set.
1916  prevheights.resize(tx.vin.size());
1917  for (size_t j = 0; j < tx.vin.size(); j++) {
1918  prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight();
1919  }
1920 
1921  if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
1922  LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n",
1923  __func__);
1925  "bad-txns-nonfinal");
1926  }
1927 
1928  // Don't cache results if we're actually connecting blocks (still
1929  // consult the cache, though).
1930  bool fCacheResults = fJustCheck;
1931 
1932  const bool fEnforceSigCheck = flags & SCRIPT_ENFORCE_SIGCHECKS;
1933  if (!fEnforceSigCheck) {
1934  // Historically, there has been transactions with a very high
1935  // sigcheck count, so we need to disable this check for such
1936  // transactions.
1937  nSigChecksTxLimiters[txIndex] = TxSigCheckLimiter::getDisabled();
1938  }
1939 
1940  std::vector<CScriptCheck> vChecks;
1941  // nSigChecksRet may be accurate (found in cache) or 0 (checks were
1942  // deferred into vChecks).
1943  int nSigChecksRet;
1944  TxValidationState tx_state;
1945  if (fScriptChecks &&
1946  !CheckInputScripts(tx, tx_state, view, flags, fCacheResults,
1947  fCacheResults, PrecomputedTransactionData(tx),
1948  nSigChecksRet, nSigChecksTxLimiters[txIndex],
1949  &nSigChecksBlockLimiter, &vChecks)) {
1950  // Any transaction validation failure in ConnectBlock is a block
1951  // consensus failure
1953  tx_state.GetRejectReason(),
1954  tx_state.GetDebugMessage());
1955  return error(
1956  "ConnectBlock(): CheckInputScripts on %s failed with %s",
1957  tx.GetId().ToString(), state.ToString());
1958  }
1959 
1960  control.Add(vChecks);
1961 
1962  // Note: this must execute in the same iteration as CheckTxInputs (not
1963  // in a separate loop) in order to detect double spends. However,
1964  // this does not prevent double-spending by duplicated transaction
1965  // inputs in the same transaction (cf. CVE-2018-17144) -- that check is
1966  // done in CheckBlock (CheckRegularTransaction).
1967  SpendCoins(view, tx, blockundo.vtxundo.at(txIndex), pindex->nHeight);
1968  txIndex++;
1969  }
1970 
1971  int64_t nTime3 = GetTimeMicros();
1972  nTimeConnect += nTime3 - nTime2;
1974  " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) "
1975  "[%.2fs (%.2fms/blk)]\n",
1976  (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2),
1977  MILLI * (nTime3 - nTime2) / block.vtx.size(),
1978  nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs - 1),
1980 
1981  Amount blockReward =
1982  nFees + GetBlockSubsidy(pindex->nHeight, consensusParams);
1983  if (block.vtx[0]->GetValueOut() > blockReward) {
1984  LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs "
1985  "limit=%d)\n",
1986  block.vtx[0]->GetValueOut(), blockReward);
1988  "bad-cb-amount");
1989  }
1990 
1991  const std::vector<CTxDestination> whitelist =
1992  GetMinerFundWhitelist(consensusParams, pindex->pprev);
1993  if (!whitelist.empty()) {
1994  const Amount required = GetMinerFundAmount(blockReward);
1995 
1996  for (auto &o : block.vtx[0]->vout) {
1997  if (o.nValue < required) {
1998  // This output doesn't qualify because its amount is too low.
1999  continue;
2000  }
2001 
2002  CTxDestination address;
2003  if (!ExtractDestination(o.scriptPubKey, address)) {
2004  // Cannot decode address.
2005  continue;
2006  }
2007 
2008  if (std::find(whitelist.begin(), whitelist.end(), address) !=
2009  whitelist.end()) {
2010  goto MinerFundSuccess;
2011  }
2012  }
2013 
2014  // We did not find an output that match the miner fund requirements.
2016  "bad-cb-minerfund");
2017  }
2018 
2019 MinerFundSuccess:
2020 
2021  if (!control.Wait()) {
2023  "blk-bad-inputs", "parallel script check failed");
2024  }
2025 
2026  int64_t nTime4 = GetTimeMicros();
2027  nTimeVerify += nTime4 - nTime2;
2028  LogPrint(
2029  BCLog::BENCH,
2030  " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n",
2031  nInputs - 1, MILLI * (nTime4 - nTime2),
2032  nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs - 1),
2034 
2035  if (fJustCheck) {
2036  return true;
2037  }
2038 
2039  if (!WriteUndoDataForBlock(blockundo, state, pindex, params)) {
2040  return false;
2041  }
2042 
2043  if (!pindex->IsValid(BlockValidity::SCRIPTS)) {
2045  setDirtyBlockIndex.insert(pindex);
2046  }
2047 
2048  assert(pindex->phashBlock);
2049  // add this block to the view's block chain
2050  view.SetBestBlock(pindex->GetBlockHash());
2051 
2052  int64_t nTime5 = GetTimeMicros();
2053  nTimeIndex += nTime5 - nTime4;
2054  LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n",
2055  MILLI * (nTime5 - nTime4), nTimeIndex * MICRO,
2057 
2058  int64_t nTime6 = GetTimeMicros();
2059  nTimeCallbacks += nTime6 - nTime5;
2060  LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n",
2061  MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO,
2063 
2064  return true;
2065 }
2066 
2068 CChainState::GetCoinsCacheSizeState(const CTxMemPool *tx_pool) {
2069  return this->GetCoinsCacheSizeState(
2070  tx_pool, m_coinstip_cache_size_bytes,
2071  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
2072 }
2073 
2075 CChainState::GetCoinsCacheSizeState(const CTxMemPool *tx_pool,
2076  size_t max_coins_cache_size_bytes,
2077  size_t max_mempool_size_bytes) {
2078  int64_t nMempoolUsage = tx_pool->DynamicMemoryUsage();
2079  int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
2080  int64_t nTotalSpace =
2081  max_coins_cache_size_bytes +
2082  std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
2083 
2085  static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES =
2086  10 * 1024 * 1024; // 10MB
2087  int64_t large_threshold = std::max(
2088  (9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
2089 
2090  if (cacheSize > nTotalSpace) {
2091  LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize,
2092  nTotalSpace);
2094  } else if (cacheSize > large_threshold) {
2096  }
2097  return CoinsCacheSizeState::OK;
2098 }
2099 
2100 bool CChainState::FlushStateToDisk(const CChainParams &chainparams,
2101  BlockValidationState &state,
2102  FlushStateMode mode,
2103  int nManualPruneHeight) {
2104  LOCK(cs_main);
2105  assert(this->CanFlushToDisk());
2106  static std::chrono::microseconds nLastWrite{0};
2107  static std::chrono::microseconds nLastFlush{0};
2108  std::set<int> setFilesToPrune;
2109  bool full_flush_completed = false;
2110 
2111  const size_t coins_count = CoinsTip().GetCacheSize();
2112  const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2113 
2114  try {
2115  {
2116  bool fFlushForPrune = false;
2117  bool fDoFullFlush = false;
2118  CoinsCacheSizeState cache_state =
2119  GetCoinsCacheSizeState(&m_mempool);
2120  LOCK(cs_LastBlockFile);
2121  if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) &&
2122  !fReindex) {
2123  if (nManualPruneHeight > 0) {
2125  "find files to prune (manual)", BCLog::BENCH);
2127  setFilesToPrune, nManualPruneHeight, m_chain.Height());
2128  } else {
2129  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune",
2130  BCLog::BENCH);
2132  setFilesToPrune, chainparams.PruneAfterHeight(),
2134  fCheckForPruning = false;
2135  }
2136  if (!setFilesToPrune.empty()) {
2137  fFlushForPrune = true;
2138  if (!fHavePruned) {
2139  pblocktree->WriteFlag("prunedblockfiles", true);
2140  fHavePruned = true;
2141  }
2142  }
2143  }
2144  const auto nNow = GetTime<std::chrono::microseconds>();
2145  // Avoid writing/flushing immediately after startup.
2146  if (nLastWrite.count() == 0) {
2147  nLastWrite = nNow;
2148  }
2149  if (nLastFlush.count() == 0) {
2150  nLastFlush = nNow;
2151  }
2152  // The cache is large and we're within 10% and 10 MiB of the limit,
2153  // but we have time now (not in the middle of a block processing).
2154  bool fCacheLarge = mode == FlushStateMode::PERIODIC &&
2155  cache_state >= CoinsCacheSizeState::LARGE;
2156  // The cache is over the limit, we have to write now.
2157  bool fCacheCritical = mode == FlushStateMode::IF_NEEDED &&
2158  cache_state >= CoinsCacheSizeState::CRITICAL;
2159  // It's been a while since we wrote the block index to disk. Do this
2160  // frequently, so we don't need to redownload after a crash.
2161  bool fPeriodicWrite = mode == FlushStateMode::PERIODIC &&
2162  nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
2163  // It's been very long since we flushed the cache. Do this
2164  // infrequently, to optimize cache usage.
2165  bool fPeriodicFlush = mode == FlushStateMode::PERIODIC &&
2166  nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
2167  // Combine all conditions that result in a full cache flush.
2168  fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge ||
2169  fCacheCritical || fPeriodicFlush || fFlushForPrune;
2170  // Write blocks and block index to disk.
2171  if (fDoFullFlush || fPeriodicWrite) {
2172  // Depend on nMinDiskSpace to ensure we can write block index
2173  if (!CheckDiskSpace(GetBlocksDir())) {
2174  return AbortNode(state, "Disk space is too low!",
2175  _("Disk space is too low!"));
2176  }
2177 
2178  {
2180  "write block and undo data to disk", BCLog::BENCH);
2181 
2182  // First make sure all block and undo data is flushed to
2183  // disk.
2184  FlushBlockFile();
2185  }
2186  // Then update all block file information (which may refer to
2187  // block and undo files).
2188  {
2189  LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk",
2190  BCLog::BENCH);
2191 
2192  std::vector<std::pair<int, const CBlockFileInfo *>> vFiles;
2193  vFiles.reserve(setDirtyFileInfo.size());
2194  for (int i : setDirtyFileInfo) {
2195  vFiles.push_back(std::make_pair(i, &vinfoBlockFile[i]));
2196  }
2197 
2198  setDirtyFileInfo.clear();
2199 
2200  std::vector<const CBlockIndex *> vBlocks;
2201  vBlocks.reserve(setDirtyBlockIndex.size());
2202  for (const CBlockIndex *cbi : setDirtyBlockIndex) {
2203  vBlocks.push_back(cbi);
2204  }
2205 
2206  setDirtyBlockIndex.clear();
2207 
2208  if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile,
2209  vBlocks)) {
2210  return AbortNode(
2211  state, "Failed to write to block index database");
2212  }
2213  }
2214 
2215  // Finally remove any pruned files
2216  if (fFlushForPrune) {
2217  LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files",
2218  BCLog::BENCH);
2219 
2220  UnlinkPrunedFiles(setFilesToPrune);
2221  }
2222  nLastWrite = nNow;
2223  }
2224  // Flush best chain related state. This can only be done if the
2225  // blocks / block index write was also done.
2226  if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2228  strprintf("write coins cache to disk (%d coins, %.2fkB)",
2229  coins_count, coins_mem_usage / 1000));
2230 
2231  // Typical Coin structures on disk are around 48 bytes in size.
2232  // Pushing a new one to the database can cause it to be written
2233  // twice (once in the log, and once in the tables). This is
2234  // already an overestimation, as most will delete an existing
2235  // entry or overwrite one. Still, use a conservative safety
2236  // factor of 2.
2237  if (!CheckDiskSpace(GetDataDir(),
2238  48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2239  return AbortNode(state, "Disk space is too low!",
2240  _("Disk space is too low!"));
2241  }
2242 
2243  // Flush the chainstate (which may refer to block index
2244  // entries).
2245  if (!CoinsTip().Flush()) {
2246  return AbortNode(state, "Failed to write to coin database");
2247  }
2248  nLastFlush = nNow;
2249  full_flush_completed = true;
2250  }
2251  }
2252 
2253  if (full_flush_completed) {
2254  // Update best block in wallet (so we can detect restored wallets).
2256  }
2257  } catch (const std::runtime_error &e) {
2258  return AbortNode(state, std::string("System error while flushing: ") +
2259  e.what());
2260  }
2261  return true;
2262 }
2263 
2265  BlockValidationState state;
2266  const CChainParams &chainparams = Params();
2267  if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
2268  LogPrintf("%s: failed to flush state (%s)\n", __func__,
2269  state.ToString());
2270  }
2271 }
2272 
2274  BlockValidationState state;
2275  fCheckForPruning = true;
2276  const CChainParams &chainparams = Params();
2277  if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
2278  LogPrintf("%s: failed to flush state (%s)\n", __func__,
2279  state.ToString());
2280  }
2281 }
2282 
2284 static void UpdateTip(CTxMemPool &mempool, const CChainParams &params,
2285  CBlockIndex *pindexNew)
2287  // New best block
2288  mempool.AddTransactionsUpdated(1);
2289 
2290  {
2292  g_best_block = pindexNew->GetBlockHash();
2293  g_best_block_cv.notify_all();
2294  }
2295 
2296  LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%ld "
2297  "date='%s' progress=%f cache=%.1fMiB(%utxo)\n",
2298  __func__, pindexNew->GetBlockHash().ToString(),
2299  pindexNew->nHeight, pindexNew->nVersion,
2300  log(pindexNew->nChainWork.getdouble()) / log(2.0),
2301  pindexNew->GetChainTxCount(),
2302  FormatISO8601DateTime(pindexNew->GetBlockTime()),
2303  GuessVerificationProgress(params.TxData(), pindexNew),
2304  ::ChainstateActive().CoinsTip().DynamicMemoryUsage() *
2305  (1.0 / (1 << 20)),
2306  ::ChainstateActive().CoinsTip().GetCacheSize());
2307 }
2308 
2321  BlockValidationState &state,
2322  DisconnectedBlockTransactions *disconnectpool) {
2325  CBlockIndex *pindexDelete = m_chain.Tip();
2326  const Consensus::Params &consensusParams = params.GetConsensus();
2327 
2328  assert(pindexDelete);
2329 
2330  // Read block from disk.
2331  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2332  CBlock &block = *pblock;
2333  if (!ReadBlockFromDisk(block, pindexDelete, consensusParams)) {
2334  return error("DisconnectTip(): Failed to read block");
2335  }
2336 
2337  // Apply the block atomically to the chain state.
2338  int64_t nStart = GetTimeMicros();
2339  {
2340  CCoinsViewCache view(&CoinsTip());
2341  assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2342  if (DisconnectBlock(block, pindexDelete, view) !=
2344  return error("DisconnectTip(): DisconnectBlock %s failed",
2345  pindexDelete->GetBlockHash().ToString());
2346  }
2347 
2348  bool flushed = view.Flush();
2349  assert(flushed);
2350  }
2351 
2352  LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
2353  (GetTimeMicros() - nStart) * MILLI);
2354 
2355  // Write the chain state to disk, if necessary.
2356  if (!FlushStateToDisk(params, state, FlushStateMode::IF_NEEDED)) {
2357  return false;
2358  }
2359 
2360  // If this block is deactivating a fork, we move all mempool transactions
2361  // in front of disconnectpool for reprocessing in a future
2362  // updateMempoolForReorg call
2363  if (pindexDelete->pprev != nullptr &&
2364  GetNextBlockScriptFlags(consensusParams, pindexDelete) !=
2365  GetNextBlockScriptFlags(consensusParams, pindexDelete->pprev)) {
2367  "Disconnecting mempool due to rewind of upgrade block\n");
2368  if (disconnectpool) {
2369  disconnectpool->importMempool(m_mempool);
2370  }
2371  m_mempool.clear();
2372  }
2373 
2374  if (disconnectpool) {
2375  disconnectpool->addForBlock(block.vtx, m_mempool);
2376  }
2377 
2378  // If the tip is finalized, then undo it.
2379  if (m_finalizedBlockIndex == pindexDelete) {
2380  m_finalizedBlockIndex = pindexDelete->pprev;
2381  }
2382 
2383  m_chain.SetTip(pindexDelete->pprev);
2384 
2385  // Update ::ChainActive() and related variables.
2386  UpdateTip(m_mempool, params, pindexDelete->pprev);
2387  // Let wallets know transactions went from 1-confirmed to
2388  // 0-confirmed or conflicted:
2389  GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2390  return true;
2391 }
2392 
2393 static int64_t nTimeReadFromDisk = 0;
2394 static int64_t nTimeConnectTotal = 0;
2395 static int64_t nTimeFlush = 0;
2396 static int64_t nTimeChainState = 0;
2397 static int64_t nTimePostConnect = 0;
2398 
2400  CBlockIndex *pindex = nullptr;
2401  std::shared_ptr<const CBlock> pblock;
2403 };
2404 
2413 private:
2414  std::vector<PerBlockConnectTrace> blocksConnected;
2415 
2416 public:
2417  explicit ConnectTrace() : blocksConnected(1) {}
2418 
2420  std::shared_ptr<const CBlock> pblock) {
2421  assert(!blocksConnected.back().pindex);
2422  assert(pindex);
2423  assert(pblock);
2424  blocksConnected.back().pindex = pindex;
2425  blocksConnected.back().pblock = std::move(pblock);
2426  blocksConnected.emplace_back();
2427  }
2428 
2429  std::vector<PerBlockConnectTrace> &GetBlocksConnected() {
2430  // We always keep one extra block at the end of our list because blocks
2431  // are added after all the conflicted transactions have been filled in.
2432  // Thus, the last entry should always be an empty one waiting for the
2433  // transactions from the next block. We pop the last entry here to make
2434  // sure the list we return is sane.
2435  assert(!blocksConnected.back().pindex);
2436  blocksConnected.pop_back();
2437  return blocksConnected;
2438  }
2439 };
2440 
2442  const CBlockIndex *pindex) {
2444  if (pindex->nStatus.isInvalid()) {
2445  // We try to finalize an invalid block.
2446  LogPrintf("ERROR: %s: Trying to finalize invalid block %s\n", __func__,
2447  pindex->GetBlockHash().ToString());
2449  "finalize-invalid-block");
2450  }
2451 
2452  // Check that the request is consistent with current finalization.
2453  if (m_finalizedBlockIndex &&
2454  !AreOnTheSameFork(pindex, m_finalizedBlockIndex)) {
2455  LogPrintf("ERROR: %s: Trying to finalize block %s which conflicts with "
2456  "already finalized block\n",
2457  __func__, pindex->GetBlockHash().ToString());
2459  "bad-fork-prior-finalized");
2460  }
2461 
2462  if (IsBlockFinalized(pindex)) {
2463  // The block is already finalized.
2464  return true;
2465  }
2466 
2467  // We have a new block to finalize.
2468  m_finalizedBlockIndex = pindex;
2469  return true;
2470 }
2471 
2475 
2476  const int32_t maxreorgdepth =
2477  gArgs.GetArg("-maxreorgdepth", DEFAULT_MAX_REORG_DEPTH);
2478 
2479  const int64_t finalizationdelay =
2480  gArgs.GetArg("-finalizationdelay", DEFAULT_MIN_FINALIZATION_DELAY);
2481 
2482  // Find our candidate.
2483  // If maxreorgdepth is < 0 pindex will be null and auto finalization
2484  // disabled
2485  const CBlockIndex *pindex =
2486  pindexNew->GetAncestor(pindexNew->nHeight - maxreorgdepth);
2487 
2488  int64_t now = GetTime();
2489 
2490  // If the finalization delay is not expired since the startup time,
2491  // finalization should be avoided. Header receive time is not saved to disk
2492  // and so cannot be anterior to startup time.
2493  if (now < (GetStartupTime() + finalizationdelay)) {
2494  return nullptr;
2495  }
2496 
2497  // While our candidate is not eligible (finalization delay not expired), try
2498  // the previous one.
2499  while (pindex && (pindex != ::ChainstateActive().GetFinalizedBlock())) {
2500  // Check that the block to finalize is known for a long enough time.
2501  // This test will ensure that an attacker could not cause a block to
2502  // finalize by forking the chain with a depth > maxreorgdepth.
2503  // If the block is loaded from disk, header receive time is 0 and the
2504  // block will be finalized. This is safe because the delay since the
2505  // node startup is already expired.
2506  auto headerReceivedTime = pindex->GetHeaderReceivedTime();
2507 
2508  // If finalization delay is <= 0, finalization always occurs immediately
2509  if (now >= (headerReceivedTime + finalizationdelay)) {
2510  return pindex;
2511  }
2512 
2513  pindex = pindex->pprev;
2514  }
2515 
2516  return nullptr;
2517 }
2518 
2528  CBlockIndex *pindexNew,
2529  const std::shared_ptr<const CBlock> &pblock,
2530  ConnectTrace &connectTrace,
2531  DisconnectedBlockTransactions &disconnectpool) {
2534 
2535  const CChainParams &params = config.GetChainParams();
2536  const Consensus::Params &consensusParams = params.GetConsensus();
2537 
2538  assert(pindexNew->pprev == m_chain.Tip());
2539  // Read block from disk.
2540  int64_t nTime1 = GetTimeMicros();
2541  std::shared_ptr<const CBlock> pthisBlock;
2542  if (!pblock) {
2543  std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2544  if (!ReadBlockFromDisk(*pblockNew, pindexNew, consensusParams)) {
2545  return AbortNode(state, "Failed to read block");
2546  }
2547  pthisBlock = pblockNew;
2548  } else {
2549  pthisBlock = pblock;
2550  }
2551 
2552  const CBlock &blockConnecting = *pthisBlock;
2553 
2554  // Apply the block atomically to the chain state.
2555  int64_t nTime2 = GetTimeMicros();
2556  nTimeReadFromDisk += nTime2 - nTime1;
2557  int64_t nTime3;
2558  LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n",
2559  (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
2560  {
2561  CCoinsViewCache view(&CoinsTip());
2562  bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, params,
2563  BlockValidationOptions(config));
2564  GetMainSignals().BlockChecked(blockConnecting, state);
2565  if (!rv) {
2566  if (state.IsInvalid()) {
2567  InvalidBlockFound(pindexNew, state);
2568  }
2569 
2570  return error("%s: ConnectBlock %s failed, %s", __func__,
2571  pindexNew->GetBlockHash().ToString(),
2572  state.ToString());
2573  }
2574 
2575  // Update the finalized block.
2576  const CBlockIndex *pindexToFinalize = FindBlockToFinalize(pindexNew);
2577  if (pindexToFinalize && !MarkBlockAsFinal(state, pindexToFinalize)) {
2578  return error("ConnectTip(): MarkBlockAsFinal %s failed (%s)",
2579  pindexNew->GetBlockHash().ToString(),
2580  state.ToString());
2581  }
2582 
2583  nTime3 = GetTimeMicros();
2584  nTimeConnectTotal += nTime3 - nTime2;
2585  assert(nBlocksTotal > 0);
2587  " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n",
2588  (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO,
2590  bool flushed = view.Flush();
2591  assert(flushed);
2592  }
2593 
2594  int64_t nTime4 = GetTimeMicros();
2595  nTimeFlush += nTime4 - nTime3;
2596  LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n",
2597  (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO,
2599 
2600  // Write the chain state to disk, if necessary.
2601  if (!FlushStateToDisk(config.GetChainParams(), state,
2603  return false;
2604  }
2605 
2606  int64_t nTime5 = GetTimeMicros();
2607  nTimeChainState += nTime5 - nTime4;
2609  " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n",
2610  (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO,
2612 
2613  // Remove conflicting transactions from the mempool.;
2614  m_mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
2615  disconnectpool.removeForBlock(blockConnecting.vtx);
2616 
2617  // If this block is activating a fork, we move all mempool transactions
2618  // in front of disconnectpool for reprocessing in a future
2619  // updateMempoolForReorg call
2620  if (pindexNew->pprev != nullptr &&
2621  GetNextBlockScriptFlags(consensusParams, pindexNew) !=
2622  GetNextBlockScriptFlags(consensusParams, pindexNew->pprev)) {
2624  "Disconnecting mempool due to acceptance of upgrade block\n");
2625  disconnectpool.importMempool(m_mempool);
2626  }
2627 
2628  // Update m_chain & related variables.
2629  m_chain.SetTip(pindexNew);
2630  UpdateTip(m_mempool, params, pindexNew);
2631 
2632  int64_t nTime6 = GetTimeMicros();
2633  nTimePostConnect += nTime6 - nTime5;
2634  nTimeTotal += nTime6 - nTime1;
2636  " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n",
2637  (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO,
2639  LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n",
2640  (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO,
2642 
2643  connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2644  return true;
2645 }
2646 
2653  do {
2654  CBlockIndex *pindexNew = nullptr;
2655 
2656  // Find the best candidate header.
2657  {
2658  std::set<CBlockIndex *, CBlockIndexWorkComparator>::reverse_iterator
2659  it = setBlockIndexCandidates.rbegin();
2660  if (it == setBlockIndexCandidates.rend()) {
2661  return nullptr;
2662  }
2663  pindexNew = *it;
2664  }
2665 
2666  // If this block will cause a finalized block to be reorged, then we
2667  // mark it as invalid.
2668  if (m_finalizedBlockIndex &&
2669  !AreOnTheSameFork(pindexNew, m_finalizedBlockIndex)) {
2670  LogPrintf("Mark block %s invalid because it forks prior to the "
2671  "finalization point %d.\n",
2672  pindexNew->GetBlockHash().ToString(),
2673  m_finalizedBlockIndex->nHeight);
2674  pindexNew->nStatus = pindexNew->nStatus.withFailed();
2675  InvalidChainFound(pindexNew);
2676  }
2677 
2678  const bool fAvalancheEnabled = isAvalancheEnabled(gArgs);
2679  const bool fAutoUnpark =
2680  gArgs.GetBoolArg("-automaticunparking", !fAvalancheEnabled);
2681 
2682  const CBlockIndex *pindexFork = m_chain.FindFork(pindexNew);
2683 
2684  // Check whether all blocks on the path between the currently active
2685  // chain and the candidate are valid. Just going until the active chain
2686  // is an optimization, as we know all blocks in it are valid already.
2687  CBlockIndex *pindexTest = pindexNew;
2688  bool hasValidAncestor = true;
2689  while (hasValidAncestor && pindexTest && pindexTest != pindexFork) {
2690  assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2691 
2692  // If this is a parked chain, but it has enough PoW, clear the park
2693  // state.
2694  bool fParkedChain = pindexTest->nStatus.isOnParkedChain();
2695  if (fAutoUnpark && fParkedChain) {
2696  const CBlockIndex *pindexTip = m_chain.Tip();
2697 
2698  // During initialization, pindexTip and/or pindexFork may be
2699  // null. In this case, we just ignore the fact that the chain is
2700  // parked.
2701  if (!pindexTip || !pindexFork) {
2702  UnparkBlock(pindexTest);
2703  continue;
2704  }
2705 
2706  // A parked chain can be unparked if it has twice as much PoW
2707  // accumulated as the main chain has since the fork block.
2708  CBlockIndex const *pindexExtraPow = pindexTip;
2709  arith_uint256 requiredWork = pindexTip->nChainWork;
2710  switch (pindexTip->nHeight - pindexFork->nHeight) {
2711  // Limit the penality for depth 1, 2 and 3 to half a block
2712  // worth of work to ensure we don't fork accidentally.
2713  case 3:
2714  case 2:
2715  pindexExtraPow = pindexExtraPow->pprev;
2716  // FALLTHROUGH
2717  case 1: {
2718  const arith_uint256 deltaWork =
2719  pindexExtraPow->nChainWork - pindexFork->nChainWork;
2720  requiredWork += (deltaWork >> 1);
2721  break;
2722  }
2723  default:
2724  requiredWork +=
2725  pindexExtraPow->nChainWork - pindexFork->nChainWork;
2726  break;
2727  }
2728 
2729  if (pindexNew->nChainWork > requiredWork) {
2730  // We have enough, clear the parked state.
2731  LogPrintf("Unpark chain up to block %s as it has "
2732  "accumulated enough PoW.\n",
2733  pindexNew->GetBlockHash().ToString());
2734  fParkedChain = false;
2735  UnparkBlock(pindexTest);
2736  }
2737  }
2738 
2739  // Pruned nodes may have entries in setBlockIndexCandidates for
2740  // which block files have been deleted. Remove those as candidates
2741  // for the most work chain if we come across them; we can't switch
2742  // to a chain unless we have all the non-active-chain parent blocks.
2743  bool fInvalidChain = pindexTest->nStatus.isInvalid();
2744  bool fMissingData = !pindexTest->nStatus.hasData();
2745  if (!(fInvalidChain || fParkedChain || fMissingData)) {
2746  // The current block is acceptable, move to the parent, up to
2747  // the fork point.
2748  pindexTest = pindexTest->pprev;
2749  continue;
2750  }
2751 
2752  // Candidate chain is not usable (either invalid or parked or
2753  // missing data)
2754  hasValidAncestor = false;
2755  setBlockIndexCandidates.erase(pindexTest);
2756 
2757  if (fInvalidChain &&
2758  (pindexBestInvalid == nullptr ||
2759  pindexNew->nChainWork > pindexBestInvalid->nChainWork)) {
2760  pindexBestInvalid = pindexNew;
2761  }
2762 
2763  if (fParkedChain &&
2764  (pindexBestParked == nullptr ||
2765  pindexNew->nChainWork > pindexBestParked->nChainWork)) {
2766  pindexBestParked = pindexNew;
2767  }
2768 
2769  LogPrintf("Considered switching to better tip %s but that chain "
2770  "contains a%s%s%s block.\n",
2771  pindexNew->GetBlockHash().ToString(),
2772  fInvalidChain ? "n invalid" : "",
2773  fParkedChain ? " parked" : "",
2774  fMissingData ? " missing-data" : "");
2775 
2776  CBlockIndex *pindexFailed = pindexNew;
2777  // Remove the entire chain from the set.
2778  while (pindexTest != pindexFailed) {
2779  if (fInvalidChain || fParkedChain) {
2780  pindexFailed->nStatus =
2781  pindexFailed->nStatus.withFailedParent(fInvalidChain)
2782  .withParkedParent(fParkedChain);
2783  } else if (fMissingData) {
2784  // If we're missing data, then add back to
2785  // m_blocks_unlinked, so that if the block arrives in the
2786  // future we can try adding to setBlockIndexCandidates
2787  // again.
2789  std::make_pair(pindexFailed->pprev, pindexFailed));
2790  }
2791  setBlockIndexCandidates.erase(pindexFailed);
2792  pindexFailed = pindexFailed->pprev;
2793  }
2794 
2795  if (fInvalidChain || fParkedChain) {
2796  // We discovered a new chain tip that is either parked or
2797  // invalid, we may want to warn.
2799  }
2800  }
2801 
2802  if (fAvalancheEnabled && g_avalanche) {
2803  g_avalanche->addBlockToReconcile(pindexNew);
2804  }
2805 
2806  // We found a candidate that has valid ancestors. This is our guy.
2807  if (hasValidAncestor) {
2808  return pindexNew;
2809  }
2810  } while (true);
2811 }
2812 
2818  // Note that we can't delete the current block itself, as we may need to
2819  // return to it later in case a reorganization to a better block fails.
2820  auto it = setBlockIndexCandidates.begin();
2821  while (it != setBlockIndexCandidates.end() &&
2822  setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2823  setBlockIndexCandidates.erase(it++);
2824  }
2825 
2826  // Either the current tip or a successor of it we're working towards is left
2827  // in setBlockIndexCandidates.
2828  assert(!setBlockIndexCandidates.empty());
2829 }
2830 
2839  const Config &config, BlockValidationState &state,
2840  CBlockIndex *pindexMostWork, const std::shared_ptr<const CBlock> &pblock,
2841  bool &fInvalidFound, ConnectTrace &connectTrace) {
2844 
2845  const CBlockIndex *pindexOldTip = m_chain.Tip();
2846  const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
2847 
2848  // Disconnect active blocks which are no longer in the best chain.
2849  bool fBlocksDisconnected = false;
2850  DisconnectedBlockTransactions disconnectpool;
2851  while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2852  if (!DisconnectTip(config.GetChainParams(), state, &disconnectpool)) {
2853  // This is likely a fatal error, but keep the mempool consistent,
2854  // just in case. Only remove from the mempool in this case.
2855  disconnectpool.updateMempoolForReorg(config, false, m_mempool);
2856 
2857  // If we're unable to disconnect a block during normal operation,
2858  // then that is a failure of our local system -- we should abort
2859  // rather than stay on a less work chain.
2860  AbortNode(state,
2861  "Failed to disconnect block; see debug.log for details");
2862  return false;
2863  }
2864 
2865  fBlocksDisconnected = true;
2866  }
2867 
2868  // Build list of new blocks to connect.
2869  std::vector<CBlockIndex *> vpindexToConnect;
2870  bool fContinue = true;
2871  int nHeight = pindexFork ? pindexFork->nHeight : -1;
2872  while (fContinue && nHeight != pindexMostWork->nHeight) {
2873  // Don't iterate the entire list of potential improvements toward the
2874  // best tip, as we likely only need a few blocks along the way.
2875  int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
2876  vpindexToConnect.clear();
2877  vpindexToConnect.reserve(nTargetHeight - nHeight);
2878  CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
2879  while (pindexIter && pindexIter->nHeight != nHeight) {
2880  vpindexToConnect.push_back(pindexIter);
2881  pindexIter = pindexIter->pprev;
2882  }
2883 
2884  nHeight = nTargetHeight;
2885 
2886  // Connect new blocks.
2887  for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
2888  if (!ConnectTip(config, state, pindexConnect,
2889  pindexConnect == pindexMostWork
2890  ? pblock
2891  : std::shared_ptr<const CBlock>(),
2892  connectTrace, disconnectpool)) {
2893  if (state.IsInvalid()) {
2894  // The block violates a consensus rule.
2895  if (state.GetResult() !=
2897  InvalidChainFound(vpindexToConnect.back());
2898  }
2899  state = BlockValidationState();
2900  fInvalidFound = true;
2901  fContinue = false;
2902  break;
2903  }
2904 
2905  // A system error occurred (disk space, database error, ...).
2906  // Make the mempool consistent with the current tip, just in
2907  // case any observers try to use it before shutdown.
2908  disconnectpool.updateMempoolForReorg(config, false, m_mempool);
2909  return false;
2910  } else {
2912  if (!pindexOldTip ||
2913  m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
2914  // We're in a better position than we were. Return
2915  // temporarily to release the lock.
2916  fContinue = false;
2917  break;
2918  }
2919  }
2920  }
2921  }
2922 
2923  if (fBlocksDisconnected || !disconnectpool.isEmpty()) {
2924  // If any blocks were disconnected, we need to update the mempool even
2925  // if disconnectpool is empty. The disconnectpool may also be non-empty
2926  // if the mempool was imported due to new validation rules being in
2927  // effect.
2928  LogPrint(BCLog::MEMPOOL, "Updating mempool due to reorganization or "
2929  "rules upgrade/downgrade\n");
2930  disconnectpool.updateMempoolForReorg(config, true, m_mempool);
2931  }
2932 
2933  m_mempool.check(&CoinsTip());
2934 
2935  // Callbacks/notifications for a new best chain.
2936  if (fInvalidFound) {
2937  CheckForkWarningConditionsOnNewFork(pindexMostWork);
2938  } else {
2940  }
2941 
2942  return true;
2943 }
2944 
2946  if (!init) {
2948  }
2949  if (::fReindex) {
2951  }
2953 }
2954 
2956  bool fNotify = false;
2957  bool fInitialBlockDownload = false;
2958  static CBlockIndex *pindexHeaderOld = nullptr;
2959  CBlockIndex *pindexHeader = nullptr;
2960  {
2961  LOCK(cs_main);
2962  pindexHeader = pindexBestHeader;
2963 
2964  if (pindexHeader != pindexHeaderOld) {
2965  fNotify = true;
2966  fInitialBlockDownload =
2968  pindexHeaderOld = pindexHeader;
2969  }
2970  }
2971 
2972  // Send block tip changed notifications without cs_main
2973  if (fNotify) {
2974  uiInterface.NotifyHeaderTip(
2975  GetSynchronizationState(fInitialBlockDownload), pindexHeader);
2976  }
2977  return fNotify;
2978 }
2979 
2982 
2983  if (GetMainSignals().CallbacksPending() > 10) {
2985  }
2986 }
2987 
2989  BlockValidationState &state,
2990  std::shared_ptr<const CBlock> pblock) {
2991  // Note that while we're often called here from ProcessNewBlock, this is
2992  // far from a guarantee. Things in the P2P/RPC will often end up calling
2993  // us in the middle of ProcessNewBlock - do not assume pblock is set
2994  // sanely for performance or correctness!
2996 
2997  const CChainParams &params = config.GetChainParams();
2998 
2999  // ABC maintains a fair degree of expensive-to-calculate internal state
3000  // because this function periodically releases cs_main so that it does not
3001  // lock up other threads for too long during large connects - and to allow
3002  // for e.g. the callback queue to drain we use m_cs_chainstate to enforce
3003  // mutual exclusion so that only one caller may execute this function at a
3004  // time
3006 
3007  CBlockIndex *pindexMostWork = nullptr;
3008  CBlockIndex *pindexNewTip = nullptr;
3009  int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
3010  do {
3011  // Block until the validation queue drains. This should largely
3012  // never happen in normal operation, however may happen during
3013  // reindex, causing memory blowup if we run too far ahead.
3014  // Note that if a validationinterface callback ends up calling
3015  // ActivateBestChain this may lead to a deadlock! We should
3016  // probably have a DEBUG_LOCKORDER test for this in the future.
3018 
3019  {
3020  LOCK(cs_main);
3021  // Lock transaction pool for at least as long as it takes for
3022  // connectTrace to be consumed
3023  LOCK(m_mempool.cs);
3024  CBlockIndex *starting_tip = m_chain.Tip();
3025  bool blocks_connected = false;
3026  do {
3027  // We absolutely may not unlock cs_main until we've made forward
3028  // progress (with the exception of shutdown due to hardware
3029  // issues, low disk space, etc).
3030 
3031  // Destructed before cs_main is unlocked
3032  ConnectTrace connectTrace;
3033 
3034  if (pindexMostWork == nullptr) {
3035  pindexMostWork = FindMostWorkChain();
3036  }
3037 
3038  // Whether we have anything to do at all.
3039  if (pindexMostWork == nullptr ||
3040  pindexMostWork == m_chain.Tip()) {
3041  break;
3042  }
3043 
3044  bool fInvalidFound = false;
3045  std::shared_ptr<const CBlock> nullBlockPtr;
3046  if (!ActivateBestChainStep(
3047  config, state, pindexMostWork,
3048  pblock && pblock->GetHash() ==
3049  pindexMostWork->GetBlockHash()
3050  ? pblock
3051  : nullBlockPtr,
3052  fInvalidFound, connectTrace)) {
3053  // A system error occurred
3054  return false;
3055  }
3056  blocks_connected = true;
3057 
3058  if (fInvalidFound) {
3059  // Wipe cache, we may need another branch now.
3060  pindexMostWork = nullptr;
3061  }
3062 
3063  pindexNewTip = m_chain.Tip();
3064  for (const PerBlockConnectTrace &trace :
3065  connectTrace.GetBlocksConnected()) {
3066  assert(trace.pblock && trace.pindex);
3067  GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
3068  }
3069  } while (!m_chain.Tip() ||
3070  (starting_tip && CBlockIndexWorkComparator()(
3071  m_chain.Tip(), starting_tip)));
3072 
3073  // Check the index once we're done with the above loop, since
3074  // we're going to release cs_main soon. If the index is in a bad
3075  // state now, then it's better to know immediately rather than
3076  // randomly have it cause a problem in a race.
3077  CheckBlockIndex(params.GetConsensus());
3078 
3079  if (!blocks_connected) {
3080  return true;
3081  }
3082 
3083  const CBlockIndex *pindexFork = m_chain.FindFork(starting_tip);
3084  bool fInitialDownload = IsInitialBlockDownload();
3085 
3086  // Notify external listeners about the new tip.
3087  // Enqueue while holding cs_main to ensure that UpdatedBlockTip is
3088  // called in the order in which blocks are connected
3089  if (pindexFork != pindexNewTip) {
3090  // Notify ValidationInterface subscribers
3091  GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork,
3092  fInitialDownload);
3093 
3094  // Always notify the UI if a new block tip was connected
3095  uiInterface.NotifyBlockTip(
3096  GetSynchronizationState(fInitialDownload), pindexNewTip);
3097  }
3098  }
3099  // When we reach this point, we switched to a new tip (stored in
3100  // pindexNewTip).
3101 
3102  if (nStopAtHeight && pindexNewTip &&
3103  pindexNewTip->nHeight >= nStopAtHeight) {
3104  StartShutdown();
3105  }
3106 
3107  // We check shutdown only after giving ActivateBestChainStep a chance to
3108  // run once so that we never shutdown before connecting the genesis
3109  // block during LoadChainTip(). Previously this caused an assert()
3110  // failure during shutdown in such cases as the UTXO DB flushing checks
3111  // that the best block hash is non-null.
3112  if (ShutdownRequested()) {
3113  break;
3114  }
3115  } while (pindexNewTip != pindexMostWork);
3116 
3117  // Write changes periodically to disk, after relay.
3118  if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
3119  return false;
3120  }
3121 
3122  return true;
3123 }
3124 
3125 bool ActivateBestChain(const Config &config, BlockValidationState &state,
3126  std::shared_ptr<const CBlock> pblock) {
3128  std::move(pblock));
3129 }
3130 
3132  BlockValidationState &state,
3133  CBlockIndex *pindex) {
3134  {
3135  LOCK(cs_main);
3136  if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
3137  // Nothing to do, this block is not at the tip.
3138  return true;
3139  }
3140 
3142  // The chain has been extended since the last call, reset the
3143  // counter.
3145  }
3146 
3148  setBlockIndexCandidates.erase(pindex);
3150  if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
3151  // We can't keep reducing the counter if somebody really wants to
3152  // call preciousblock 2**31-1 times on the same set of tips...
3154  }
3155 
3156  // In case this was parked, unpark it.
3157  UnparkBlock(pindex);
3158 
3159  // Make sure it is added to the candidate list if appropriate.
3160  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3161  pindex->HaveTxsDownloaded()) {
3162  setBlockIndexCandidates.insert(pindex);
3164  }
3165  }
3166 
3167  return ActivateBestChain(config, state);
3168 }
3169 
3170 bool PreciousBlock(const Config &config, BlockValidationState &state,
3171  CBlockIndex *pindex) {
3172  return ::ChainstateActive().PreciousBlock(config, state, pindex);
3173 }
3174 
3176  CBlockIndex *pindex, bool invalidate) {
3177  CBlockIndex *to_mark_failed_or_parked = pindex;
3178  bool pindex_was_in_chain = false;
3179  int disconnected = 0;
3180  const CChainParams &chainparams = config.GetChainParams();
3181 
3182  // We do not allow ActivateBestChain() to run while UnwindBlock() is
3183  // running, as that could cause the tip to change while we disconnect
3184  // blocks. (Note for backport of Core PR16849: we acquire
3185  // LOCK(m_cs_chainstate) in the Park, Invalidate and FinalizeBlock functions
3186  // due to differences in our code)
3188 
3189  // We'll be acquiring and releasing cs_main below, to allow the validation
3190  // callbacks to run. However, we should keep the block index in a
3191  // consistent state as we disconnect blocks -- in particular we need to
3192  // add equal-work blocks to setBlockIndexCandidates as we disconnect.
3193  // To avoid walking the block index repeatedly in search of candidates,
3194  // build a map once so that we can look up candidate blocks by chain
3195  // work as we go.
3196  std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
3197 
3198  {
3199  LOCK(cs_main);
3200  for (const auto &entry : m_blockman.m_block_index) {
3201  CBlockIndex *candidate = entry.second;
3202  // We don't need to put anything in our active chain into the
3203  // multimap, because those candidates will be found and considered
3204  // as we disconnect.
3205  // Instead, consider only non-active-chain blocks that have at
3206  // least as much work as where we expect the new tip to end up.
3207  if (!m_chain.Contains(candidate) &&
3208  !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
3209  candidate->IsValid(BlockValidity::TRANSACTIONS) &&
3210  candidate->HaveTxsDownloaded()) {
3211  candidate_blocks_by_work.insert(
3212  std::make_pair(candidate->nChainWork, candidate));
3213  }
3214  }
3215  }
3216 
3217  // Disconnect (descendants of) pindex, and mark them invalid.
3218  while (true) {
3219  if (ShutdownRequested()) {
3220  break;
3221  }
3222 
3223  // Make sure the queue of validation callbacks doesn't grow unboundedly.
3225 
3226  LOCK(cs_main);
3227  // Lock for as long as disconnectpool is in scope to make sure
3228  // UpdateMempoolForReorg is called after DisconnectTip without unlocking
3229  // in between
3230  LOCK(m_mempool.cs);
3231 
3232  if (!m_chain.Contains(pindex)) {
3233  break;
3234  }
3235 
3236  pindex_was_in_chain = true;
3237  CBlockIndex *invalid_walk_tip = m_chain.Tip();
3238 
3239  // ActivateBestChain considers blocks already in m_chain
3240  // unconditionally valid already, so force disconnect away from it.
3241 
3242  DisconnectedBlockTransactions disconnectpool;
3243 
3244  bool ret = DisconnectTip(chainparams, state, &disconnectpool);
3245 
3246  // DisconnectTip will add transactions to disconnectpool.
3247  // Adjust the mempool to be consistent with the new tip, adding
3248  // transactions back to the mempool if disconnecting was successful,
3249  // and we're not doing a very deep invalidation (in which case
3250  // keeping the mempool up to date is probably futile anyway).
3251  disconnectpool.updateMempoolForReorg(
3252  config, /* fAddToMempool = */ (++disconnected <= 10) && ret,
3253  m_mempool);
3254 
3255  if (!ret) {
3256  return false;
3257  }
3258 
3259  assert(invalid_walk_tip->pprev == m_chain.Tip());
3260 
3261  // We immediately mark the disconnected blocks as invalid.
3262  // This prevents a case where pruned nodes may fail to invalidateblock
3263  // and be left unable to start as they have no tip candidates (as there
3264  // are no blocks that meet the "have data and are not invalid per
3265  // nStatus" criteria for inclusion in setBlockIndexCandidates).
3266 
3267  invalid_walk_tip->nStatus =
3268  invalidate ? invalid_walk_tip->nStatus.withFailed()
3269  : invalid_walk_tip->nStatus.withParked();
3270 
3271  setDirtyBlockIndex.insert(invalid_walk_tip);
3272  setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
3273 
3274  if (invalid_walk_tip == to_mark_failed_or_parked->pprev &&
3275  (invalidate ? to_mark_failed_or_parked->nStatus.hasFailed()
3276  : to_mark_failed_or_parked->nStatus.isParked())) {
3277  // We only want to mark the last disconnected block as
3278  // Failed (or Parked); its children need to be FailedParent (or
3279  // ParkedParent) instead.
3280  to_mark_failed_or_parked->nStatus =
3281  (invalidate
3282  ? to_mark_failed_or_parked->nStatus.withFailed(false)
3283  .withFailedParent()
3284  : to_mark_failed_or_parked->nStatus.withParked(false)
3285  .withParkedParent());
3286 
3287  setDirtyBlockIndex.insert(to_mark_failed_or_parked);
3288  }
3289 
3290  // Add any equal or more work headers to setBlockIndexCandidates
3291  auto candidate_it = candidate_blocks_by_work.lower_bound(
3292  invalid_walk_tip->pprev->nChainWork);
3293  while (candidate_it != candidate_blocks_by_work.end()) {
3294  if (!CBlockIndexWorkComparator()(candidate_it->second,
3295  invalid_walk_tip->pprev)) {
3296  setBlockIndexCandidates.insert(candidate_it->second);
3297  candidate_it = candidate_blocks_by_work.erase(candidate_it);
3298  } else {
3299  ++candidate_it;
3300  }
3301  }
3302 
3303  // Track the last disconnected block, so we can correct its
3304  // FailedParent (or ParkedParent) status in future iterations, or, if
3305  // it's the last one, call InvalidChainFound on it.
3306  to_mark_failed_or_parked = invalid_walk_tip;
3307  }
3308 
3309  CheckBlockIndex(chainparams.GetConsensus());
3310 
3311  {
3312  LOCK(cs_main);
3313  if (m_chain.Contains(to_mark_failed_or_parked)) {
3314  // If the to-be-marked invalid block is in the active chain,
3315  // something is interfering and we can't proceed.
3316  return false;
3317  }
3318 
3319  // Mark pindex (or the last disconnected block) as invalid (or parked),
3320  // even when it never was in the main chain.
3321  to_mark_failed_or_parked->nStatus =
3322  invalidate ? to_mark_failed_or_parked->nStatus.withFailed()
3323  : to_mark_failed_or_parked->nStatus.withParked();
3324  setDirtyBlockIndex.insert(to_mark_failed_or_parked);
3325  if (invalidate) {
3326  m_blockman.m_failed_blocks.insert(to_mark_failed_or_parked);
3327  }
3328 
3329  // If any new blocks somehow arrived while we were disconnecting
3330  // (above), then the pre-calculation of what should go into
3331  // setBlockIndexCandidates may have missed entries. This would
3332  // technically be an inconsistency in the block index, but if we clean
3333  // it up here, this should be an essentially unobservable error.
3334  // Loop back over all block index entries and add any missing entries
3335  // to setBlockIndexCandidates.
3336  for (const std::pair<const BlockHash, CBlockIndex *> &it :
3337  m_blockman.m_block_index) {
3338  CBlockIndex *i = it.second;
3340  i->HaveTxsDownloaded() &&
3341  !setBlockIndexCandidates.value_comp()(i, m_chain.Tip())) {
3342  setBlockIndexCandidates.insert(i);
3343  }
3344  }
3345 
3346  if (invalidate) {
3347  InvalidChainFound(to_mark_failed_or_parked);
3348  }
3349  }
3350 
3351  // Only notify about a new block tip if the active chain was modified.
3352  if (pindex_was_in_chain) {
3353  uiInterface.NotifyBlockTip(
3355  to_mark_failed_or_parked->pprev);
3356  }
3357  return true;
3358 }
3359 
3361  BlockValidationState &state,
3362  CBlockIndex *pindex) {
3364  // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
3366 
3367  return UnwindBlock(config, state, pindex, true);
3368 }
3369 
3371  CBlockIndex *pindex) {
3373  // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
3375 
3376  return UnwindBlock(config, state, pindex, false);
3377 }
3378 
3380  BlockValidationState &state,
3381  CBlockIndex *pindex) {
3383  // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
3385 
3387  CBlockIndex *pindexToInvalidate = nullptr;
3388  {
3389  LOCK(cs_main);
3390  if (!MarkBlockAsFinal(state, pindex)) {
3391  // state is set by MarkBlockAsFinal.
3392  return false;
3393  }
3394 
3395  // We have a valid candidate, make sure it is not parked.
3396  if (pindex->nStatus.isOnParkedChain()) {
3397  UnparkBlock(pindex);
3398  }
3399 
3400  // If the finalized block is on the active chain, there is no need to
3401  // rewind.
3402  if (::ChainActive().Contains(pindex)) {
3403  return true;
3404  }
3405 
3406  // If the finalized block is not on the active chain, that chain is
3407  // invalid
3408  // ...
3409  const CBlockIndex *pindexFork = ::ChainActive().FindFork(pindex);
3410  pindexToInvalidate = ::ChainActive().Next(pindexFork);
3411  if (!pindexToInvalidate) {
3412  return false;
3413  }
3414  } // end of locked cs_main scope
3415 
3416  // ... therefore, we invalidate the block on the active chain that comes
3417  // immediately after it
3418  return UnwindBlock(config, state, pindexToInvalidate,
3419  true /* invalidating */);
3420 }
3421 
3422 template <typename F>
3424  CBlockIndex *pindex, F f) {
3425  BlockStatus newStatus = f(pindex->nStatus);
3426  if (pindex->nStatus != newStatus &&
3427  (!pindexBase ||
3428  pindex->GetAncestor(pindexBase->nHeight) == pindexBase)) {
3429  pindex->nStatus = newStatus;
3430  setDirtyBlockIndex.insert(pindex);
3431  if (newStatus.isValid()) {
3432  m_blockman.m_failed_blocks.erase(pindex);
3433  }
3434 
3435  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3436  pindex->HaveTxsDownloaded() &&
3437  setBlockIndexCandidates.value_comp()(::ChainActive().Tip(),
3438  pindex)) {
3439  setBlockIndexCandidates.insert(pindex);
3440  }
3441  return true;
3442  }
3443  return false;
3444 }
3445 
3446 template <typename F, typename C, typename AC>
3448  F f, C fChild, AC fAncestorWasChanged) {
3450 
3451  // Update the current block and ancestors; while we're doing this, identify
3452  // which was the deepest ancestor we changed.
3453  CBlockIndex *pindexDeepestChanged = pindex;
3454  for (auto pindexAncestor = pindex; pindexAncestor != nullptr;
3455  pindexAncestor = pindexAncestor->pprev) {
3456  if (UpdateFlagsForBlock(nullptr, pindexAncestor, f)) {
3457  pindexDeepestChanged = pindexAncestor;
3458  }
3459  }
3460 
3461  if (pindexReset &&
3462  pindexReset->GetAncestor(pindexDeepestChanged->nHeight) ==
3463  pindexDeepestChanged) {
3464  // reset pindexReset if it had a modified ancestor.
3465  pindexReset = nullptr;
3466  }
3467 
3468  // Update all blocks under modified blocks.
3469  BlockMap::iterator it = m_blockman.m_block_index.begin();
3470  while (it != m_blockman.m_block_index.end()) {
3471  UpdateFlagsForBlock(pindex, it->second, fChild);
3472  UpdateFlagsForBlock(pindexDeepestChanged, it->second,
3473  fAncestorWasChanged);
3474  it++;
3475  }
3476 }
3477 
3480 
3481  // In case we are reconsidering something before the finalization point,
3482  // move the finalization point to the last common ancestor.
3483  if (m_finalizedBlockIndex) {
3484  m_finalizedBlockIndex =
3485  LastCommonAncestor(pindex, m_finalizedBlockIndex);
3486  }
3487 
3488  UpdateFlags(
3489  pindex, pindexBestInvalid,
3490  [](const BlockStatus status) {
3491  return status.withClearedFailureFlags();
3492  },
3493  [](const BlockStatus status) {
3494  return status.withClearedFailureFlags();
3495  },
3496  [](const BlockStatus status) {
3497  return status.withFailedParent(false);
3498  });
3499 }
3500 
3503 }
3504 
3505 void CChainState::UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren) {
3507 
3508  UpdateFlags(
3509  pindex, pindexBestParked,
3510  [](const BlockStatus status) {
3511  return status.withClearedParkedFlags();
3512  },
3513  [fClearChildren](const BlockStatus status) {
3514  return fClearChildren ? status.withClearedParkedFlags()
3515  : status.withParkedParent(false);
3516  },
3517  [](const BlockStatus status) {
3518  return status.withParkedParent(false);
3519  });
3520 }
3521 
3524 }
3525 
3526 void UnparkBlock(CBlockIndex *pindex) {
3527  return ::ChainstateActive().UnparkBlockImpl(pindex, false);
3528 }
3529 
3530 bool CChainState::IsBlockFinalized(const CBlockIndex *pindex) const {
3532  return m_finalizedBlockIndex &&
3533  m_finalizedBlockIndex->GetAncestor(pindex->nHeight) == pindex;
3534 }
3535 
3539  return m_finalizedBlockIndex;
3540 }
3541 
3544 
3545  // Check for duplicate
3546  BlockHash hash = block.GetHash();
3547  BlockMap::iterator it = m_block_index.find(hash);
3548  if (it != m_block_index.end()) {
3549  return it->second;
3550  }
3551 
3552  // Construct new block index object
3553  CBlockIndex *pindexNew = new CBlockIndex(block);
3554  // We assign the sequence id to blocks only when the full data is available,
3555  // to avoid miners withholding blocks but broadcasting headers, to get a
3556  // competitive advantage.
3557  pindexNew->nSequenceId = 0;
3558  BlockMap::iterator mi =
3559  m_block_index.insert(std::make_pair(hash, pindexNew)).first;
3560  pindexNew->phashBlock = &((*mi).first);
3561  BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
3562  if (miPrev != m_block_index.end()) {
3563  pindexNew->pprev = (*miPrev).second;
3564  pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
3565  pindexNew->BuildSkip();
3566  }
3567  pindexNew->nTimeReceived = GetTime();
3568  pindexNew->nTimeMax =
3569  (pindexNew->pprev
3570  ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime)
3571  : pindexNew->nTime);
3572  pindexNew->nChainWork =
3573  (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) +
3574  GetBlockProof(*pindexNew);
3575  pindexNew->RaiseValidity(BlockValidity::TREE);
3576  if (pindexBestHeader == nullptr ||
3577  pindexBestHeader->nChainWork < pindexNew->nChainWork) {
3578  pindexBestHeader = pindexNew;
3579  }
3580 
3581  setDirtyBlockIndex.insert(pindexNew);
3582  return pindexNew;
3583 }
3584 
3590  CBlockIndex *pindexNew,
3591  const FlatFilePos &pos) {
3592  pindexNew->nTx = block.vtx.size();
3593  pindexNew->nSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
3594  pindexNew->nFile = pos.nFile;
3595  pindexNew->nDataPos = pos.nPos;
3596  pindexNew->nUndoPos = 0;
3597  pindexNew->nStatus = pindexNew->nStatus.withData();
3599  setDirtyBlockIndex.insert(pindexNew);
3600 
3601  if (pindexNew->UpdateChainStats()) {
3602  // If pindexNew is the genesis block or all parents are
3603  // BLOCK_VALID_TRANSACTIONS.
3604  std::deque<CBlockIndex *> queue;
3605  queue.push_back(pindexNew);
3606 
3607  // Recursively process any descendant blocks that now may be eligible to
3608  // be connected.
3609  while (!queue.empty()) {
3610  CBlockIndex *pindex = queue.front();
3611  queue.pop_front();
3612  pindex->UpdateChainStats();
3613  if (pindex->nSequenceId == 0) {
3614  // We assign a sequence is when transaction are received to
3615  // prevent a miner from being able to broadcast a block but not
3616  // its content. However, a sequence id may have been set
3617  // manually, for instance via PreciousBlock, in which case, we
3618  // don't need to assign one.
3619  pindex->nSequenceId = nBlockSequenceId++;
3620  }
3621 
3622  if (m_chain.Tip() == nullptr ||
3623  !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
3624  setBlockIndexCandidates.insert(pindex);
3625  }
3626 
3627  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
3628  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
3629  range = m_blockman.m_blocks_unlinked.equal_range(pindex);
3630  while (range.first != range.second) {
3631  std::multimap<CBlockIndex *, CBlockIndex *>::iterator it =
3632  range.first;
3633  queue.push_back(it->second);
3634  range.first++;
3635  m_blockman.m_blocks_unlinked.erase(it);
3636  }
3637  }
3638  } else if (pindexNew->pprev &&
3639  pindexNew->pprev->IsValid(BlockValidity::TREE)) {
3641  std::make_pair(pindexNew->pprev, pindexNew));
3642  }
3643 }
3644 
3645 static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize,
3646  unsigned int nHeight, uint64_t nTime,
3647  bool fKnown = false) {
3648  LOCK(cs_LastBlockFile);
3649 
3650  unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
3651  if (vinfoBlockFile.size() <= nFile) {
3652  vinfoBlockFile.resize(nFile + 1);
3653  }
3654 
3655  bool finalize_undo = false;
3656  if (!fKnown) {
3657  while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
3658  // when the undo file is keeping up with the block file, we want to
3659  // flush it explicitly when it is lagging behind (more blocks arrive
3660  // than are being connected), we let the undo block write case
3661  // handle it
3662  finalize_undo = (vinfoBlockFile[nFile].nHeightLast ==
3663  (unsigned int)ChainActive().Tip()->nHeight);
3664  nFile++;
3665  if (vinfoBlockFile.size() <= nFile) {
3666  vinfoBlockFile.resize(nFile + 1);
3667  }
3668  }
3669  pos.nFile = nFile;
3670  pos.nPos = vinfoBlockFile[nFile].nSize;
3671  }
3672 
3673  if ((int)nFile != nLastBlockFile) {
3674  if (!fKnown) {
3675  LogPrintf("Leaving block file %i: %s\n", nLastBlockFile,
3676  vinfoBlockFile[nLastBlockFile].ToString());
3677  }
3678  FlushBlockFile(!fKnown, finalize_undo);
3679  nLastBlockFile = nFile;
3680  }
3681 
3682  vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
3683  if (fKnown) {
3684  vinfoBlockFile[nFile].nSize =
3685  std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
3686  } else {
3687  vinfoBlockFile[nFile].nSize += nAddSize;
3688  }
3689 
3690  if (!fKnown) {
3691  bool out_of_space;
3692  size_t bytes_allocated =
3693  BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
3694  if (out_of_space) {
3695  return AbortNode("Disk space is too low!",
3696  _("Disk space is too low!"));
3697  }
3698  if (bytes_allocated != 0 && fPruneMode) {
3699  fCheckForPruning = true;
3700  }
3701  }
3702 
3703  setDirtyFileInfo.insert(nFile);
3704  return true;
3705 }
3706 
3707 static bool FindUndoPos(BlockValidationState &state, int nFile,
3708  FlatFilePos &pos, unsigned int nAddSize) {
3709  pos.nFile = nFile;
3710 
3711  LOCK(cs_LastBlockFile);
3712 
3713  pos.nPos = vinfoBlockFile[nFile].nUndoSize;
3714  vinfoBlockFile[nFile].nUndoSize += nAddSize;
3715  setDirtyFileInfo.insert(nFile);
3716 
3717  bool out_of_space;
3718  size_t bytes_allocated =
3719  UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
3720  if (out_of_space) {
3721  return AbortNode(state, "Disk space is too low!",
3722  _("Disk space is too low!"));
3723  }
3724  if (bytes_allocated != 0 && fPruneMode) {
3725  fCheckForPruning = true;
3726  }
3727 
3728  return true;
3729 }
3730 
3739 static bool CheckBlockHeader(const CBlockHeader &block,
3740  BlockValidationState &state,
3741  const Consensus::Params &params,
3742  BlockValidationOptions validationOptions) {
3743  // Check proof of work matches claimed amount
3744  if (validationOptions.shouldValidatePoW() &&
3745  !CheckProofOfWork(block.GetHash(), block.nBits, params)) {
3747  "high-hash", "proof of work failed");
3748  }
3749 
3750  return true;
3751 }
3752 
3753 bool CheckBlock(const CBlock &block, BlockValidationState &state,
3754  const Consensus::Params &params,
3755  BlockValidationOptions validationOptions) {
3756  // These are checks that are independent of context.
3757  if (block.fChecked) {
3758  return true;
3759  }
3760 
3761  // Check that the header is valid (particularly PoW). This is mostly
3762  // redundant with the call in AcceptBlockHeader.
3763  if (!CheckBlockHeader(block, state, params, validationOptions)) {
3764  return false;
3765  }
3766 
3767  // Check the merkle root.
3768  if (validationOptions.shouldValidateMerkleRoot()) {
3769  bool mutated;
3770  uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3771  if (block.hashMerkleRoot != hashMerkleRoot2) {
3773  "bad-txnmrklroot", "hashMerkleRoot mismatch");
3774  }
3775 
3776  // Check for merkle tree malleability (CVE-2012-2459): repeating
3777  // sequences of transactions in a block without affecting the merkle
3778  // root of a block, while still invalidating it.
3779  if (mutated) {
3781  "bad-txns-duplicate", "duplicate transaction");
3782  }
3783  }
3784 
3785  // All potential-corruption validation must be done before we do any
3786  // transaction validation, as otherwise we may mark the header as invalid
3787  // because we receive the wrong transactions for it.
3788 
3789  // First transaction must be coinbase.
3790  if (block.vtx.empty()) {
3792  "bad-cb-missing", "first tx is not coinbase");
3793  }
3794 
3795  // Size limits.
3796  auto nMaxBlockSize = validationOptions.getExcessiveBlockSize();
3797 
3798  // Bail early if there is no way this block is of reasonable size.
3799  if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) {
3801  "bad-blk-length", "size limits failed");
3802  }
3803 
3804  auto currentBlockSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
3805  if (currentBlockSize > nMaxBlockSize) {
3807  "bad-blk-length", "size limits failed");
3808  }
3809 
3810  // And a valid coinbase.
3811  TxValidationState tx_state;
3812  if (!CheckCoinbase(*block.vtx[0], tx_state)) {
3814  tx_state.GetRejectReason(),
3815  strprintf("Coinbase check failed (txid %s) %s",
3816  block.vtx[0]->GetId().ToString(),
3817  tx_state.GetDebugMessage()));
3818  }
3819 
3820  // Check transactions for regularity, skipping the first. Note that this
3821  // is the first time we check that all after the first are !IsCoinBase.
3822  for (size_t i = 1; i < block.vtx.size(); i++) {
3823  auto *tx = block.vtx[i].get();
3824  if (!CheckRegularTransaction(*tx, tx_state)) {
3825  return state.Invalid(
3827  tx_state.GetRejectReason(),
3828  strprintf("Transaction check failed (txid %s) %s",
3829  tx->GetId().ToString(), tx_state.GetDebugMessage()));
3830  }
3831  }
3832 
3833  if (validationOptions.shouldValidatePoW() &&
3834  validationOptions.shouldValidateMerkleRoot()) {
3835  block.fChecked = true;
3836  }
3837 
3838  return true;
3839 }
3840 
3851 static bool ContextualCheckBlockHeader(const CChainParams &params,
3852  const CBlockHeader &block,
3853  BlockValidationState &state,
3854  const CBlockIndex *pindexPrev,
3855  int64_t nAdjustedTime)
3857  assert(pindexPrev != nullptr);
3858  const int nHeight = pindexPrev->nHeight + 1;
3859 
3860  // Check proof of work
3861  if (block.nBits != GetNextWorkRequired(pindexPrev, &block, params)) {
3862  LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight);
3864  "bad-diffbits", "incorrect proof of work");
3865  }
3866 
3867  // Check against checkpoints
3868  if (fCheckpointsEnabled) {
3869  const CCheckpointData &checkpoints = params.Checkpoints();
3870 
3871  // Check that the block chain matches the known block chain up to a
3872  // checkpoint.
3873  if (!Checkpoints::CheckBlock(checkpoints, nHeight, block.GetHash())) {
3874  LogPrintf("ERROR: %s: rejected by checkpoint lock-in at %d\n",
3875  __func__, nHeight);
3877  "checkpoint mismatch");
3878  }
3879 
3880  // Don't accept any forks from the main chain prior to last checkpoint.
3881  // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's
3882  // in our BlockIndex().
3883  CBlockIndex *pcheckpoint = Checkpoints::GetLastCheckpoint(checkpoints);
3884  if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3885  LogPrintf("ERROR: %s: forked chain older than last checkpoint "
3886  "(height %d)\n",
3887  __func__, nHeight);
3889  "bad-fork-prior-to-checkpoint");
3890  }
3891  }
3892 
3893  // Check timestamp against prev
3894  if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) {
3896  "time-too-old", "block's timestamp is too early");
3897  }
3898 
3899  // Check timestamp
3900  if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME) {
3902  "time-too-new",
3903  "block timestamp too far in the future");
3904  }
3905 
3906  // Reject outdated version blocks when 95% (75% on testnet) of the network
3907  // has upgraded:
3908  // check for version 2, 3 and 4 upgrades
3909  const Consensus::Params &consensusParams = params.GetConsensus();
3910  if ((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
3911  (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
3912  (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height)) {
3913  return state.Invalid(
3915  strprintf("bad-version(0x%08x)", block.nVersion),
3916  strprintf("rejected nVersion=0x%08x block", block.nVersion));
3917  }
3918 
3919  return true;
3920 }
3921 
3923  const CTransaction &tx,
3924  TxValidationState &state,
3925  int flags) {
3927 
3928  // By convention a negative value for flags indicates that the current
3929  // network-enforced consensus rules should be used. In a future soft-fork
3930  // scenario that would mean checking which rules would be enforced for the
3931  // next block and setting the appropriate flags. At the present time no
3932  // soft-forks are scheduled, so no flags are set.
3933  flags = std::max(flags, 0);
3934 
3935  // ContextualCheckTransactionForCurrentBlock() uses
3936  // ::ChainActive().Height()+1 to evaluate nLockTime because when IsFinalTx()
3937  // is called within CBlock::AcceptBlock(), the height of the block *being*
3938  // evaluated is what is used. Thus if we want to know if a transaction can
3939  // be part of the *next* block, we need to call ContextualCheckTransaction()
3940  // with one more than ::ChainActive().Height().
3941  const int nBlockHeight = ::ChainActive().Height() + 1;
3942 
3943  // BIP113 will require that time-locked transactions have nLockTime set to
3944  // less than the median time of the previous block they're contained in.
3945  // When the next block is created its previous block will be the current
3946  // chain tip, so we use that to calculate the median time passed to
3947  // ContextualCheckTransaction() if LOCKTIME_MEDIAN_TIME_PAST is set.
3948  const int64_t nMedianTimePast =
3949  ::ChainActive().Tip() == nullptr
3950  ? 0
3952  const int64_t nLockTimeCutoff = (flags & LOCKTIME_MEDIAN_TIME_PAST)
3953  ? nMedianTimePast
3954  : GetAdjustedTime();
3955 
3956  return ContextualCheckTransaction(params, tx, state, nBlockHeight,
3957  nLockTimeCutoff, nMedianTimePast);
3958 }
3959 
3967 static bool ContextualCheckBlock(const CBlock &block,
3968  BlockValidationState &state,
3969  const Consensus::Params &params,
3970  const CBlockIndex *pindexPrev) {
3971  const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3972 
3973  // Start enforcing BIP113 (Median Time Past).
3974  int nLockTimeFlags = 0;
3975  if (nHeight >= params.CSVHeight) {
3976  assert(pindexPrev != nullptr);
3977  nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
3978  }
3979 
3980  const int64_t nMedianTimePast =
3981  pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast();
3982 
3983  const int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
3984  ? nMedianTimePast
3985  : block.GetBlockTime();
3986 
3987  const bool fIsMagneticAnomalyEnabled =
3988  IsMagneticAnomalyEnabled(params, pindexPrev);
3989 
3990  // Check transactions:
3991  // - canonical ordering
3992  // - ensure they are finalized
3993  // - perform a preliminary block-sigops count (they will be recounted more
3994  // strictly during ConnectBlock).
3995  // - perform a transaction-sigops check (again, a more strict check will
3996  // happen in ConnectBlock).
3997  const CTransaction *prevTx = nullptr;
3998  for (const auto &ptx : block.vtx) {
3999  const CTransaction &tx = *ptx;
4000  if (fIsMagneticAnomalyEnabled) {
4001  if (prevTx && (tx.GetId() <= prevTx->GetId())) {
4002  if (tx.GetId() == prevTx->GetId()) {
4004  "tx-duplicate",
4005  strprintf("Duplicated transaction %s",
4006  tx.GetId().ToString()));
4007  }
4008 
4009  return state.Invalid(
4011  strprintf("Transaction order is invalid (%s < %s)",
4012  tx.GetId().ToString(),
4013  prevTx->GetId().ToString()));
4014  }
4015 
4016  if (prevTx || !tx.IsCoinBase()) {
4017  prevTx = &tx;
4018  }
4019  }
4020 
4021  TxValidationState tx_state;
4022  if (!ContextualCheckTransaction(params, tx, tx_state, nHeight,
4023  nLockTimeCutoff, nMedianTimePast)) {
4025  tx_state.GetRejectReason(),
4026  tx_state.GetDebugMessage());
4027  }
4028  }
4029 
4030  // Enforce rule that the coinbase starts with serialized block height
4031  if (nHeight >= params.BIP34Height) {
4032  CScript expect = CScript() << nHeight;
4033  if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
4034  !std::equal(expect.begin(), expect.end(),
4035  block.vtx[0]->vin[0].scriptSig.begin())) {
4037  "bad-cb-height",
4038  "block height mismatch in coinbase");
4039  }
4040  }
4041 
4042  return true;
4043 }
4044 
4051  const CBlockHeader &block,
4052  BlockValidationState &state,
4053  CBlockIndex **ppindex) {
4055  const CChainParams &chainparams = config.GetChainParams();
4056 
4057  // Check for duplicate
4058  BlockHash hash = block.GetHash();
4059  BlockMap::iterator miSelf = m_block_index.find(hash);
4060  CBlockIndex *pindex = nullptr;
4061  if (hash != chainparams.GetConsensus().hashGenesisBlock) {
4062  if (miSelf != m_block_index.end()) {
4063  // Block header is already known.
4064  pindex = miSelf->second;
4065  if (ppindex) {
4066  *ppindex = pindex;
4067  }
4068 
4069  if (pindex->nStatus.isInvalid()) {
4070  LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__,
4071  hash.ToString());
4072  return state.Invalid(
4074  }
4075 
4076  return true;
4077  }
4078 
4079  if (!CheckBlockHeader(block, state, chainparams.GetConsensus(),
4080  BlockValidationOptions(config))) {
4082  "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__,
4083  hash.ToString(), state.ToString());
4084  return false;
4085  }
4086 
4087  // Get prev block index
4088  BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
4089  if (mi == m_block_index.end()) {
4090  LogPrintf("ERROR: %s: prev block not found\n", __func__);
4092  "prev-blk-not-found");
4093  }
4094 
4095  CBlockIndex *pindexPrev = (*mi).second;
4096  assert(pindexPrev);
4097  if (pindexPrev->nStatus.isInvalid()) {
4098  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
4100  "bad-prevblk");
4101  }
4102 
4103  if (!ContextualCheckBlockHeader(chainparams, block, state, pindexPrev,
4104  GetAdjustedTime())) {
4105  return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s",
4106  __func__, hash.ToString(), state.ToString());
4107  }
4108 
4109  /* Determine if this block descends from any block which has been found
4110  * invalid (m_failed_blocks), then mark pindexPrev and any blocks
4111  * between them as failed. For example:
4112  *
4113  * D3
4114  * /
4115  * B2 - C2
4116  * / \
4117  * A D2 - E2 - F2
4118  * \
4119  * B1 - C1 - D1 - E1
4120  *
4121  * In the case that we attempted to reorg from E1 to F2, only to find
4122  * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
4123  * but NOT D3 (it was not in any of our candidate sets at the time).
4124  *
4125  * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
4126  * in LoadBlockIndex.
4127  */
4128  if (!pindexPrev->IsValid(BlockValidity::SCRIPTS)) {
4129  // The above does not mean "invalid": it checks if the previous
4130  // block hasn't been validated up to BlockValidity::SCRIPTS. This is
4131  // a performance optimization, in the common case of adding a new
4132  // block to the tip, we don't need to iterate over the failed blocks
4133  // list.
4134  for (const CBlockIndex *failedit : m_failed_blocks) {
4135  if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
4136  assert(failedit->nStatus.hasFailed());
4137  CBlockIndex *invalid_walk = pindexPrev;
4138  while (invalid_walk != failedit) {
4139  invalid_walk->nStatus =
4140  invalid_walk->nStatus.withFailedParent();
4141  setDirtyBlockIndex.insert(invalid_walk);
4142  invalid_walk = invalid_walk->pprev;
4143  }
4144  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
4145  return state.Invalid(
4147  "bad-prevblk");
4148  }
4149  }
4150  }
4151  }
4152 
4153  if (pindex == nullptr) {
4154  pindex = AddToBlockIndex(block);
4155  }
4156 
4157  if (ppindex) {
4158  *ppindex = pindex;
4159  }
4160 
4161  return true;
4162 }
4163 
4164 // Exposed wrapper for AcceptBlockHeader
4166  const Config &config, const std::vector<CBlockHeader> &headers,
4167  BlockValidationState &state, const CBlockIndex **ppindex) {
4169  {
4170  LOCK(cs_main);
4171  for (const CBlockHeader &header : headers) {
4172  // Use a temp pindex instead of ppindex to avoid a const_cast
4173  CBlockIndex *pindex = nullptr;
4174  bool accepted =
4175  m_blockman.AcceptBlockHeader(config, header, state, &pindex);
4177  config.GetChainParams().GetConsensus());
4178 
4179  if (!accepted) {
4180  return false;
4181  }
4182 
4183  if (ppindex) {
4184  *ppindex = pindex;
4185  }
4186  }
4187  }
4188 
4189  if (NotifyHeaderTip()) {
4190  if (::ChainstateActive().IsInitialBlockDownload() && ppindex &&
4191  *ppindex) {
4192  LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n",
4193  (*ppindex)->nHeight,
4194  100.0 /
4195  ((*ppindex)->nHeight +
4196  (GetAdjustedTime() - (*ppindex)->GetBlockTime()) /
4197  Params().GetConsensus().nPowTargetSpacing) *
4198  (*ppindex)->nHeight);
4199  }
4200  }
4201  return true;
4202 }
4203 
4208 static FlatFilePos SaveBlockToDisk(const CBlock &block, int nHeight,
4209  const CChainParams &chainparams,
4210  const FlatFilePos *dbp) {
4211  unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
4212  FlatFilePos blockPos;
4213  if (dbp != nullptr) {
4214  blockPos = *dbp;
4215  }
4216  if (!FindBlockPos(blockPos, nBlockSize + 8, nHeight, block.GetBlockTime(),
4217  dbp != nullptr)) {
4218  error("%s: FindBlockPos failed", __func__);
4219  return FlatFilePos();
4220  }
4221  if (dbp == nullptr) {
4222  if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) {
4223  AbortNode("Failed to write block");
4224  return FlatFilePos();
4225  }
4226  }
4227  return blockPos;
4228 }
4229 
4242  const std::shared_ptr<const CBlock> &pblock,
4243  BlockValidationState &state, bool fRequested,
4244  const FlatFilePos *dbp, bool *fNewBlock) {
4246 
4247  const CBlock &block = *pblock;
4248  if (fNewBlock) {
4249  *fNewBlock = false;
4250  }
4251 
4252  CBlockIndex *pindex = nullptr;
4253 
4254  bool accepted_header =
4255  m_blockman.AcceptBlockHeader(config, block, state, &pindex);
4257 
4258  if (!accepted_header) {
4259  return false;
4260  }
4261 
4262  // Try to process all requested blocks that we don't have, but only
4263  // process an unrequested block if it's new and has enough work to
4264  // advance our tip, and isn't too many blocks ahead.
4265  bool fAlreadyHave = pindex->nStatus.hasData();
4266 
4267  // TODO: deal better with return value and error conditions for duplicate
4268  // and unrequested blocks.
4269  if (fAlreadyHave) {
4270  return true;
4271  }
4272 
4273  // Compare block header timestamps and received times of the block and the
4274  // chaintip. If they have the same chain height, use these diffs as a
4275  // tie-breaker, attempting to pick the more honestly-mined block.
4276  int64_t newBlockTimeDiff = std::llabs(pindex->GetReceivedTimeDiff());
4277  int64_t chainTipTimeDiff =
4278  m_chain.Tip() ? std::llabs(m_chain.Tip()->GetReceivedTimeDiff()) : 0;
4279 
4280  bool isSameHeight =
4281  m_chain.Tip() && (pindex->nChainWork == m_chain.Tip()->nChainWork);
4282  if (isSameHeight) {
4283  LogPrintf("Chain tip timestamp-to-received-time difference: hash=%s, "
4284  "diff=%d\n",
4285  m_chain.Tip()->GetBlockHash().ToString(), chainTipTimeDiff);
4286  LogPrintf("New block timestamp-to-received-time difference: hash=%s, "
4287  "diff=%d\n",
4288  pindex->GetBlockHash().ToString(), newBlockTimeDiff);
4289  }
4290 
4291  bool fHasMoreOrSameWork =
4292  (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork
4293  : true);
4294 
4295  // Blocks that are too out-of-order needlessly limit the effectiveness of
4296  // pruning, because pruning will not delete block files that contain any
4297  // blocks which are too close in height to the tip. Apply this test
4298  // regardless of whether pruning is enabled; it should generally be safe to
4299  // not process unrequested blocks.
4300  bool fTooFarAhead =
4301  (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
4302 
4303  // TODO: Decouple this function from the block download logic by removing
4304  // fRequested
4305  // This requires some new chain data structure to efficiently look up if a
4306  // block is in a chain leading to a candidate for best tip, despite not
4307  // being such a candidate itself.
4308 
4309  // If we didn't ask for it:
4310  if (!fRequested) {
4311  // This is a previously-processed block that was pruned.
4312  if (pindex->nTx != 0) {
4313  return true;
4314  }
4315 
4316  // Don't process less-work chains.
4317  if (!fHasMoreOrSameWork) {
4318  return true;
4319  }
4320 
4321  // Block height is too high.
4322  if (fTooFarAhead) {
4323  return true;
4324  }
4325 
4326  // Protect against DoS attacks from low-work chains.
4327  // If our tip is behind, a peer could try to send us
4328  // low-work blocks on a fake chain that we would never
4329  // request; don't process these.
4330  if (pindex->nChainWork < nMinimumChainWork) {
4331  return true;
4332  }
4333  }
4334 
4335  const CChainParams &chainparams = config.GetChainParams();
4336  const Consensus::Params &consensusParams = chainparams.GetConsensus();
4337 
4338  if (!CheckBlock(block, state, consensusParams,
4339  BlockValidationOptions(config)) ||
4340  !ContextualCheckBlock(block, state, consensusParams, pindex->pprev)) {
4341  if (state.IsInvalid() &&
4343  pindex->nStatus = pindex->nStatus.withFailed();
4344  setDirtyBlockIndex.insert(pindex);
4345  }
4346 
4347  return error("%s: %s (block %s)", __func__, state.ToString(),
4348  block.GetHash().ToString());
4349  }
4350 
4351  // If connecting the new block would require rewinding more than one block
4352  // from the active chain (i.e., a "deep reorg"), then mark the new block as
4353  // parked. If it has enough work then it will be automatically unparked
4354  // later, during FindMostWorkChain. We mark the block as parked at the very
4355  // last minute so we can make sure everything is ready to be reorged if
4356  // needed.
4357  if (gArgs.GetBoolArg("-parkdeepreorg", true)) {
4358  const CBlockIndex *pindexFork = m_chain.FindFork(pindex);
4359  if (pindexFork && pindexFork->nHeight + 1 < m_chain.Height()) {
4360  LogPrintf("Park block %s as it would cause a deep reorg.\n",
4361  pindex->GetBlockHash().ToString());
4362  pindex->nStatus = pindex->nStatus.withParked();
4363  setDirtyBlockIndex.insert(pindex);
4364  }
4365  }
4366 
4367  // Header is valid/has work and the merkle tree is good.
4368  // Relay now, but if it does not build on our best tip, let the
4369  // SendMessages loop relay it.
4370  if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev) {
4371  GetMainSignals().NewPoWValidBlock(pindex, pblock);
4372  }
4373 
4374  // Write block to history file
4375  if (fNewBlock) {
4376  *fNewBlock = true;
4377  }
4378  try {
4379  FlatFilePos blockPos =
4380  SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
4381  if (blockPos.IsNull()) {
4382  state.Error(strprintf(
4383  "%s: Failed to find position to write new block to disk",
4384  __func__));
4385  return false;
4386  }
4387  ReceivedBlockTransactions(block, pindex, blockPos);
4388  } catch (const std::runtime_error &e) {
4389  return AbortNode(state, std::string("System error: ") + e.what());
4390  }
4391 
4392  FlushStateToDisk(chainparams, state, FlushStateMode::NONE);
4393 
4394  CheckBlockIndex(consensusParams);
4395 
4396  return true;
4397 }
4398 
4400  const Config &config, const std::shared_ptr<const CBlock> pblock,
4401  bool fForceProcessing, bool *fNewBlock) {
4403 
4404  {
4405  if (fNewBlock) {
4406  *fNewBlock = false;
4407  }
4408 
4409  BlockValidationState state;
4410 
4411  // CheckBlock() does not support multi-threaded block validation
4412  // because CBlock::fChecked can cause data race.
4413  // Therefore, the following critical section must include the
4414  // CheckBlock() call as well.
4415  LOCK(cs_main);
4416 
4417  // Ensure that CheckBlock() passes before calling AcceptBlock, as
4418  // belt-and-suspenders.
4419  bool ret =
4420  CheckBlock(*pblock, state, config.GetChainParams().GetConsensus(),
4421  BlockValidationOptions(config));
4422  if (ret) {
4423  // Store to disk
4425  config, pblock, state, fForceProcessing, nullptr, fNewBlock);
4426  }
4427 
4428  if (!ret) {
4429  GetMainSignals().BlockChecked(*pblock, state);
4430  return error("%s: AcceptBlock FAILED (%s)", __func__,
4431  state.ToString());
4432  }
4433  }
4434 
4435  NotifyHeaderTip();
4436 
4437  // Only used to report errors, not invalidity - ignore it
4438  BlockValidationState state;
4439  if (!::ChainstateActive().ActivateBestChain(config, state, pblock)) {
4440  return error("%s: ActivateBestChain failed (%s)", __func__,
4441  state.ToString());
4442  }
4443 
4444  return true;
4445 }
4446 
4448  const CBlock &block, CBlockIndex *pindexPrev,
4449  BlockValidationOptions validationOptions) {
4451  assert(pindexPrev && pindexPrev == ::ChainActive().Tip());
4452  CCoinsViewCache viewNew(&::ChainstateActive().CoinsTip());
4453  BlockHash block_hash(block.GetHash());
4454  CBlockIndex indexDummy(block);
4455  indexDummy.pprev = pindexPrev;
4456  indexDummy.nHeight = pindexPrev->nHeight + 1;
4457  indexDummy.phashBlock = &block_hash;
4458 
4459  // NOTE: CheckBlockHeader is called by CheckBlock
4460  if (!ContextualCheckBlockHeader(params, block, state, pindexPrev,
4461  GetAdjustedTime())) {
4462  return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__,
4463  state.ToString());
4464  }
4465 
4466  if (!CheckBlock(block, state, params.GetConsensus(), validationOptions)) {
4467  return error("%s: Consensus::CheckBlock: %s", __func__,
4468  state.ToString());
4469  }
4470 
4471  if (!ContextualCheckBlock(block, state, params.GetConsensus(),
4472  pindexPrev)) {
4473  return error("%s: Consensus::ContextualCheckBlock: %s", __func__,
4474  state.ToString());
4475  }
4476 
4477  if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew,
4478  params, validationOptions, true)) {
4479  return false;
4480  }
4481 
4482  assert(state.IsValid());
4483  return true;
4484 }
4485 
4494  LOCK(cs_LastBlockFile);
4495 
4496  uint64_t retval = 0;
4497  for (const CBlockFileInfo &file : vinfoBlockFile) {
4498  retval += file.nSize + file.nUndoSize;
4499  }
4500 
4501  return retval;
4502 }
4503 
4504 void BlockManager::PruneOneBlockFile(const int fileNumber) {
4506  LOCK(cs_LastBlockFile);
4507 
4508  for (const auto &entry : m_block_index) {
4509  CBlockIndex *pindex = entry.second;
4510  if (pindex->nFile == fileNumber) {
4511  pindex->nStatus = pindex->nStatus.withData(false).withUndo(false);
4512  pindex->nFile = 0;
4513  pindex->nDataPos = 0;
4514  pindex->nUndoPos = 0;
4515  setDirtyBlockIndex.insert(pindex);
4516 
4517  // Prune from m_blocks_unlinked -- any block we prune would have
4518  // to be downloaded again in order to consider its chain, at which
4519  // point it would be considered as a candidate for
4520  // m_blocks_unlinked or setBlockIndexCandidates.
4521  auto range = m_blocks_unlinked.equal_range(pindex->pprev);
4522  while (range.first != range.second) {
4523  std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it =
4524  range.first;
4525  range.first++;
4526  if (_it->second == pindex) {
4527  m_blocks_unlinked.erase(_it);
4528  }
4529  }
4530  }
4531  }
4532 
4533  vinfoBlockFile[fileNumber].SetNull();
4534  setDirtyFileInfo.insert(fileNumber);
4535 }
4536 
4537 void UnlinkPrunedFiles(const std::set<int> &setFilesToPrune) {
4538  for (const int i : setFilesToPrune) {
4539  FlatFilePos pos(i, 0);
4540  fs::remove(BlockFileSeq().FileName(pos));
4541  fs::remove(UndoFileSeq().FileName(pos));
4542  LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, i);
4543  }
4544 }
4545 
4546 void BlockManager::FindFilesToPruneManual(std::set<int> &setFilesToPrune,
4547  int nManualPruneHeight,
4548  int chain_tip_height) {
4549  assert(fPruneMode && nManualPruneHeight > 0);
4550 
4551  LOCK2(cs_main, cs_LastBlockFile);
4552  if (chain_tip_height < 0) {
4553  return;
4554  }
4555 
4556  // last block to prune is the lesser of (user-specified height,
4557  // MIN_BLOCKS_TO_KEEP from the tip)
4558  unsigned int nLastBlockWeCanPrune = std::min(
4559  (unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
4560  int count = 0;
4561  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
4562  if (vinfoBlockFile[fileNumber].nSize == 0 ||
4563  vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
4564  continue;
4565  }
4566  PruneOneBlockFile(fileNumber);
4567  setFilesToPrune.insert(fileNumber);
4568  count++;
4569  }
4570  LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
4571  nLastBlockWeCanPrune, count);
4572 }
4573 
4574 /* This function is called from the RPC code for pruneblockchain */
4575 void PruneBlockFilesManual(int nManualPruneHeight) {
4576  BlockValidationState state;
4577  const CChainParams &chainparams = Params();
4578  if (!::ChainstateActive().FlushStateToDisk(
4579  chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
4580  LogPrintf("%s: failed to flush state (%s)\n", __func__,
4581  state.ToString());
4582  }
4583 }
4584 
4585 void BlockManager::FindFilesToPrune(std::set<int> &setFilesToPrune,
4586  uint64_t nPruneAfterHeight,
4587  int chain_tip_height, bool is_ibd) {
4588  LOCK2(cs_main, cs_LastBlockFile);
4589  if (chain_tip_height < 0 || nPruneTarget == 0) {
4590  return;
4591  }
4592  if (uint64_t(chain_tip_height) <= nPruneAfterHeight) {
4593  return;
4594  }
4595 
4596  unsigned int nLastBlockWeCanPrune = chain_tip_height - MIN_BLOCKS_TO_KEEP;
4597  uint64_t nCurrentUsage = CalculateCurrentUsage();
4598  // We don't check to prune until after we've allocated new space for files,
4599  // so we should leave a buffer under our target to account for another
4600  // allocation before the next pruning.
4601  uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
4602  uint64_t nBytesToPrune;
4603  int count = 0;
4604 
4605  if (nCurrentUsage + nBuffer >= nPruneTarget) {
4606  // On a prune event, the chainstate DB is flushed.
4607  // To avoid excessive prune events negating the benefit of high dbcache
4608  // values, we should not prune too rapidly.
4609  // So when pruning in IBD, increase the buffer a bit to avoid a re-prune
4610  // too soon.
4611  if (is_ibd) {
4612  // Since this is only relevant during IBD, we use a fixed 10%
4613  nBuffer += nPruneTarget / 10;
4614  }
4615 
4616  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
4617  nBytesToPrune = vinfoBlockFile[fileNumber].nSize +
4618  vinfoBlockFile[fileNumber].nUndoSize;
4619 
4620  if (vinfoBlockFile[fileNumber].nSize == 0) {
4621  continue;
4622  }
4623 
4624  // are we below our target?
4625  if (nCurrentUsage + nBuffer < nPruneTarget) {
4626  break;
4627  }
4628 
4629  // don't prune files that could have a block within
4630  // MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
4631  if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
4632  continue;
4633  }
4634 
4635  PruneOneBlockFile(fileNumber);
4636  // Queue up the files for removal
4637  setFilesToPrune.insert(fileNumber);
4638  nCurrentUsage -= nBytesToPrune;
4639  count++;
4640  }
4641  }
4642 
4644  "Prune: target=%dMiB actual=%dMiB diff=%dMiB "
4645  "max_prune_height=%d removed %d blk/rev pairs\n",
4646  nPruneTarget / 1024 / 1024, nCurrentUsage / 1024 / 1024,
4647  ((int64_t)nPruneTarget - (int64_t)nCurrentUsage) / 1024 / 1024,
4648  nLastBlockWeCanPrune, count);
4649 }
4650 
4653 
4654  if (hash.IsNull()) {
4655  return nullptr;
4656  }
4657 
4658  // Return existing
4659  BlockMap::iterator mi = m_block_index.find(hash);
4660  if (mi != m_block_index.end()) {
4661  return (*mi).second;
4662  }
4663 
4664  // Create new
4665  CBlockIndex *pindexNew = new CBlockIndex();
4666  mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
4667  pindexNew->phashBlock = &((*mi).first);
4668 
4669  return pindexNew;
4670 }
4671 
4673  const Consensus::Params &params, CBlockTreeDB &blocktree,
4674  std::set<CBlockIndex *, CBlockIndexWorkComparator>
4675  &block_index_candidates) {
4677  if (!blocktree.LoadBlockIndexGuts(
4678  params, [this](const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(
4679  cs_main) { return this->InsertBlockIndex(hash); })) {
4680  return false;
4681  }
4682 
4683  // Calculate nChainWork
4684  std::vector<std::pair<int, CBlockIndex *>> vSortedByHeight;
4685  vSortedByHeight.reserve(m_block_index.size());
4686  for (const std::pair<const BlockHash, CBlockIndex *> &item :
4687  m_block_index) {
4688  CBlockIndex *pindex = item.second;
4689  vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
4690  }
4691 
4692  sort(vSortedByHeight.begin(), vSortedByHeight.end());
4693  for (const std::pair<int, CBlockIndex *> &item : vSortedByHeight) {
4694  if (ShutdownRequested()) {
4695  return false;
4696  }
4697  CBlockIndex *pindex = item.second;
4698  pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) +
4699  GetBlockProof(*pindex);
4700  pindex->nTimeMax =
4701  (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime)
4702  : pindex->nTime);
4703  // We can link the chain of blocks for which we've received transactions
4704  // at some point. Pruned nodes may have deleted the block.
4705  if (pindex->nTx > 0) {
4706  if (!pindex->UpdateChainStats() && pindex->pprev) {
4707  m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
4708  }
4709  }
4710 
4711  if (!pindex->nStatus.hasFailed() && pindex->pprev &&
4712  pindex->pprev->nStatus.hasFailed()) {
4713  pindex->nStatus = pindex->nStatus.withFailedParent();
4714  setDirtyBlockIndex.insert(pindex);
4715  }
4716  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
4717  (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) {
4718  block_index_candidates.insert(pindex);
4719  }
4720 
4721  if (pindex->nStatus.isInvalid() &&
4722  (!pindexBestInvalid ||
4723  pindex->nChainWork > pindexBestInvalid->nChainWork)) {
4724  pindexBestInvalid = pindex;
4725  }
4726 
4727  if (pindex->nStatus.isOnParkedChain() &&
4728  (!pindexBestParked ||
4729  pindex->nChainWork > pindexBestParked->nChainWork)) {
4730  pindexBestParked = pindex;
4731  }
4732 
4733  if (pindex->pprev) {
4734  pindex->BuildSkip();
4735  }
4736 
4737  if (pindex->IsValid(BlockValidity::TREE) &&
4738  (pindexBestHeader == nullptr ||
4740  pindexBestHeader = pindex;
4741  }
4742  }
4743 
4744  return true;
4745 }
4746 
4748  m_failed_blocks.clear();
4749  m_blocks_unlinked.clear();
4750 
4751  for (const BlockMap::value_type &entry : m_block_index) {
4752  delete entry.second;
4753  }
4754 
4755  m_block_index.clear();
4756 }
4757 
4758 static bool LoadBlockIndexDB(ChainstateManager &chainman,
4759  const Consensus::Params &params)
4761  if (!chainman.m_blockman.LoadBlockIndex(
4762  params, *pblocktree,
4763  ::ChainstateActive().setBlockIndexCandidates)) {
4764  return false;
4765  }
4766 
4767  // Load block file info
4768  pblocktree->ReadLastBlockFile(nLastBlockFile);
4769  vinfoBlockFile.resize(nLastBlockFile + 1);
4770  LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
4771  for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
4772  pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
4773  }
4774  LogPrintf("%s: last block file info: %s\n", __func__,
4775  vinfoBlockFile[nLastBlockFile].ToString());
4776  for (int nFile = nLastBlockFile + 1; true; nFile++) {
4777  CBlockFileInfo info;
4778  if (pblocktree->ReadBlockFileInfo(nFile, info)) {
4779  vinfoBlockFile.push_back(info);
4780  } else {
4781  break;
4782  }
4783  }
4784 
4785  // Check presence of blk files
4786  LogPrintf("Checking all blk files are present...\n");
4787  std::set<int> setBlkDataFiles;
4788  for (const std::pair<const BlockHash, CBlockIndex *> &item :
4789  chainman.BlockIndex()) {
4790  CBlockIndex *pindex = item.second;
4791  if (pindex->nStatus.hasData()) {
4792  setBlkDataFiles.insert(pindex->nFile);
4793  }
4794  }
4795 
4796  for (const int i : setBlkDataFiles) {
4797  FlatFilePos pos(i, 0);
4799  .IsNull()) {
4800  return false;
4801  }
4802  }
4803 
4804  // Check whether we have ever pruned block & undo files
4805  pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
4806  if (fHavePruned) {
4807  LogPrintf(
4808  "LoadBlockIndexDB(): Block files have previously been pruned\n");
4809  }
4810 
4811  // Check whether we need to continue reindexing
4812  if (pblocktree->IsReindexing()) {
4813  fReindex = true;
4814  }
4815 
4816  return true;
4817 }
4818 
4819 void CChainState::LoadMempool(const Config &config, const ArgsManager &args) {
4820  if (args.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
4821  ::LoadMempool(config, m_mempool);
4822  }
4824 }
4825 
4826 bool CChainState::LoadChainTip(const CChainParams &chainparams) {
4828  const CCoinsViewCache &coins_cache = CoinsTip();
4829  // Never called when the coins view is empty
4830  assert(!coins_cache.GetBestBlock().IsNull());
4831  const CBlockIndex *tip = m_chain.Tip();
4832 
4833  if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
4834  return true;
4835  }
4836 
4837  // Load pointer to end of best chain
4838  CBlockIndex *pindex = LookupBlockIndex(coins_cache.GetBestBlock());
4839  if (!pindex) {
4840  return false;
4841  }
4842  m_chain.SetTip(pindex);
4844 
4845  tip = m_chain.Tip();
4846  LogPrintf(
4847  "Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
4848  tip->GetBlockHash().ToString(), m_chain.Height(),
4850  GuessVerificationProgress(chainparams.TxData(), tip));
4851  return true;
4852 }
4853 
4855  uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false);
4856 }
4857 
4859  uiInterface.ShowProgress("", 100, false);
4860 }
4861 
4862 bool CVerifyDB::VerifyDB(const Config &config, CCoinsView *coinsview,
4863  int nCheckLevel, int nCheckDepth) {
4864  LOCK(cs_main);
4865 
4866  const CChainParams &params = config.GetChainParams();
4867  const Consensus::Params &consensusParams = params.GetConsensus();
4868 
4869  if (::ChainActive().Tip() == nullptr ||
4870  ::ChainActive().Tip()->pprev == nullptr) {
4871  return true;
4872  }
4873 
4874  // Verify blocks in the best chain
4876  nCheckDepth = ::ChainActive().Height();
4877  }
4878 
4879  nCheckLevel = std::max(0, std::min(4, nCheckLevel));
4880  LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth,
4881  nCheckLevel);
4882 
4883  CCoinsViewCache coins(coinsview);
4884  CBlockIndex *pindex;
4885  CBlockIndex *pindexFailure = nullptr;
4886  int nGoodTransactions = 0;
4887  BlockValidationState state;
4888  int reportDone = 0;
4889  LogPrintfToBeContinued("[0%%]...");
4890  for (pindex = ::ChainActive().Tip(); pindex && pindex->pprev;
4891  pindex = pindex->pprev) {
4892  const int percentageDone =
4893  std::max(1, std::min(99, (int)(((double)(::ChainActive().Height() -
4894  pindex->nHeight)) /
4895  (double)nCheckDepth *
4896  (nCheckLevel >= 4 ? 50 : 100))));
4897  if (reportDone < percentageDone / 10) {
4898  // report every 10% step
4899  LogPrintfToBeContinued("[%d%%]...", percentageDone);
4900  reportDone = percentageDone / 10;
4901  }
4902 
4903  uiInterface.ShowProgress(_("Verifying blocks...").translated,
4904  percentageDone, false);
4905  if (pindex->nHeight <= ::ChainActive().Height() - nCheckDepth) {
4906  break;
4907  }
4908 
4909  if (fPruneMode && !pindex->nStatus.hasData()) {
4910  // If pruning, only go back as far as we have data.
4911  LogPrintf("VerifyDB(): block verification stopping at height %d "
4912  "(pruning, no data)\n",
4913  pindex->nHeight);
4914  break;
4915  }
4916 
4917  CBlock block;
4918 
4919  // check level 0: read from disk
4920  if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
4921  return error(
4922  "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
4923  pindex->nHeight, pindex->GetBlockHash().ToString());
4924  }
4925 
4926  // check level 1: verify block validity
4927  if (nCheckLevel >= 1 && !CheckBlock(block, state, consensusParams,
4928  BlockValidationOptions(config))) {
4929  return error("%s: *** found bad block at %d, hash=%s (%s)\n",
4930  __func__, pindex->nHeight,
4931  pindex->GetBlockHash().ToString(), state.ToString());
4932  }
4933 
4934  // check level 2: verify undo validity
4935  if (nCheckLevel >= 2 && pindex) {
4936  CBlockUndo undo;
4937  if (!pindex->GetUndoPos().IsNull()) {
4938  if (!UndoReadFromDisk(undo, pindex)) {
4939  return error(
4940  "VerifyDB(): *** found bad undo data at %d, hash=%s\n",
4941  pindex->nHeight, pindex->GetBlockHash().ToString());
4942  }
4943  }
4944  }
4945  // check level 3: check for inconsistencies during memory-only
4946  // disconnect of tip blocks
4947  if (nCheckLevel >= 3 &&
4948  (coins.DynamicMemoryUsage() +
4949  ::ChainstateActive().CoinsTip().DynamicMemoryUsage()) <=
4950  ::ChainstateActive().m_coinstip_cache_size_bytes) {
4951  assert(coins.GetBestBlock() == pindex->GetBlockHash());
4952  DisconnectResult res =
4953  ::ChainstateActive().DisconnectBlock(block, pindex, coins);
4954  if (res == DisconnectResult::FAILED) {
4955  return error("VerifyDB(): *** irrecoverable inconsistency in "
4956  "block data at %d, hash=%s",
4957  pindex->nHeight,
4958  pindex->GetBlockHash().ToString());
4959  }
4960 
4961  if (res == DisconnectResult::UNCLEAN) {
4962  nGoodTransactions = 0;
4963  pindexFailure = pindex;
4964  } else {
4965  nGoodTransactions += block.vtx.size();
4966  }
4967  }
4968 
4969  if (ShutdownRequested()) {
4970  return true;
4971  }
4972  }
4973 
4974  if (pindexFailure) {
4975  return error("VerifyDB(): *** coin database inconsistencies found "
4976  "(last %i blocks, %i good transactions before that)\n",
4977  ::ChainActive().Height() - pindexFailure->nHeight + 1,
4978  nGoodTransactions);
4979  }
4980 
4981  // store block count as we move pindex at check level >= 4
4982  int block_count = ::ChainActive().Height() - pindex->nHeight;
4983 
4984  // check level 4: try reconnecting blocks
4985  if (nCheckLevel >= 4) {
4986  while (pindex != ::ChainActive().Tip()) {
4987  const int percentageDone = std::max(
4988  1, std::min(99, 100 - int(double(::ChainActive().Height() -
4989  pindex->nHeight) /
4990  double(nCheckDepth) * 50)));
4991  if (reportDone < percentageDone / 10) {
4992  // report every 10% step
4993  LogPrintfToBeContinued("[%d%%]...", percentageDone);
4994  reportDone = percentageDone / 10;
4995  }
4996  uiInterface.ShowProgress(_("Verifying blocks...").translated,
4997  percentageDone, false);
4998  pindex = ::ChainActive().Next(pindex);
4999  CBlock block;
5000  if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
5001  return error(
5002  "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
5003  pindex->nHeight, pindex->GetBlockHash().ToString());
5004  }
5005  if (!::ChainstateActive().ConnectBlock(
5006  block, state, pindex, coins, params,
5007  BlockValidationOptions(config))) {
5008  return error("VerifyDB(): *** found unconnectable block at %d, "
5009  "hash=%s (%s)",
5010  pindex->nHeight, pindex->GetBlockHash().ToString(),
5011  state.ToString());
5012  }
5013  if (ShutdownRequested()) {
5014  return true;
5015  }
5016  }
5017  }
5018 
5019  LogPrintf("[DONE].\n");
5020  LogPrintf("No coin database inconsistencies in last %i blocks (%i "
5021  "transactions)\n",
5022  block_count, nGoodTransactions);
5023 
5024  return true;
5025 }
5026 
5032  CCoinsViewCache &view,
5033  const Consensus::Params &params) {
5034  // TODO: merge with ConnectBlock
5035  CBlock block;
5036  if (!ReadBlockFromDisk(block, pindex, params)) {
5037  return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s",
5038  pindex->nHeight, pindex->GetBlockHash().ToString());
5039  }
5040 
5041  for (const CTransactionRef &tx : block.vtx) {
5042  // Pass check = true as every addition may be an overwrite.
5043  AddCoins(view, *tx, pindex->nHeight, true);
5044  }
5045 
5046  for (const CTransactionRef &tx : block.vtx) {
5047  if (tx->IsCoinBase()) {
5048  continue;
5049  }
5050 
5051  for (const CTxIn &txin : tx->vin) {
5052  view.SpendCoin(txin.prevout);
5053  }
5054  }
5055 
5056  return true;
5057 }
5058 
5060  LOCK(cs_main);
5061 
5062  CCoinsView &db = this->CoinsDB();
5063  CCoinsViewCache cache(&db);
5064 
5065  std::vector<BlockHash> hashHeads = db.GetHeadBlocks();
5066  if (hashHeads.empty()) {
5067  // We're already in a consistent state.
5068  return true;
5069  }
5070  if (hashHeads.size() != 2) {
5071  return error("ReplayBlocks(): unknown inconsistent state");
5072  }
5073 
5074  uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false);
5075  LogPrintf("Replaying blocks\n");
5076 
5077  // Old tip during the interrupted flush.
5078  const CBlockIndex *pindexOld = nullptr;
5079  // New tip during the interrupted flush.
5080  const CBlockIndex *pindexNew;
5081  // Latest block common to both the old and the new tip.
5082  const CBlockIndex *pindexFork = nullptr;
5083 
5084  if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
5085  return error(
5086  "ReplayBlocks(): reorganization to unknown block requested");
5087  }
5088 
5089  pindexNew = m_blockman.m_block_index[hashHeads[0]];
5090 
5091  if (!hashHeads[1].IsNull()) {
5092  // The old tip is allowed to be 0, indicating it's the first flush.
5093  if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
5094  return error(
5095  "ReplayBlocks(): reorganization from unknown block requested");
5096  }
5097 
5098  pindexOld = m_blockman.m_block_index[hashHeads[1]];
5099  pindexFork = LastCommonAncestor(pindexOld, pindexNew);
5100  assert(pindexFork != nullptr);
5101  }
5102 
5103  // Rollback along the old branch.
5104  while (pindexOld != pindexFork) {
5105  if (pindexOld->nHeight > 0) {
5106  // Never disconnect the genesis block.
5107  CBlock block;
5108  if (!ReadBlockFromDisk(block, pindexOld, params)) {
5109  return error("RollbackBlock(): ReadBlockFromDisk() failed at "
5110  "%d, hash=%s",
5111  pindexOld->nHeight,
5112  pindexOld->GetBlockHash().ToString());
5113  }
5114 
5115  LogPrintf("Rolling back %s (%i)\n",
5116  pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
5117  DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
5118  if (res == DisconnectResult::FAILED) {
5119  return error(
5120  "RollbackBlock(): DisconnectBlock failed at %d, hash=%s",
5121  pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
5122  }
5123 
5124  // If DisconnectResult::UNCLEAN is returned, it means a non-existing
5125  // UTXO was deleted, or an existing UTXO was overwritten. It
5126  // corresponds to cases where the block-to-be-disconnect never had
5127  // all its operations applied to the UTXO set. However, as both
5128  // writing a UTXO and deleting a UTXO are idempotent operations, the
5129  // result is still a version of the UTXO set with the effects of
5130  // that block undone.
5131  }
5132  pindexOld = pindexOld->pprev;
5133  }
5134 
5135  // Roll forward from the forking point to the new tip.
5136  int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
5137  for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight;
5138  ++nHeight) {
5139  const CBlockIndex *pindex = pindexNew->GetAncestor(nHeight);
5140  LogPrintf("Rolling forward %s (%i)\n",
5141  pindex->GetBlockHash().ToString(), nHeight);
5142  uiInterface.ShowProgress(_("Replaying blocks...").translated,
5143  (int)((nHeight - nForkHeight) * 100.0 /
5144  (pindexNew->nHeight - nForkHeight)),
5145  false);
5146  if (!RollforwardBlock(pindex, cache, params)) {
5147  return false;
5148  }
5149  }
5150 
5151  cache.SetBestBlock(pindexNew->GetBlockHash());
5152  cache.Flush();
5153  uiInterface.ShowProgress("", 100, false);
5154  return true;
5155 }
5156 
5157 // May NOT be used after any connections are up as much of the peer-processing
5158 // logic assumes a consistent block index state
5160  nBlockSequenceId = 1;
5161  setBlockIndexCandidates.clear();
5162 
5163  // Do not point to CBlockIndex that will be free'd
5164  m_finalizedBlockIndex = nullptr;
5165 }
5166 
5167 // May NOT be used after any connections are up as much
5168 // of the peer-processing logic assumes a consistent
5169 // block index state
5170 void UnloadBlockIndex(CTxMemPool *mempool, ChainstateManager &chainman) {
5171  LOCK(cs_main);
5172  chainman.Unload();
5173  pindexBestInvalid = nullptr;
5174  pindexBestParked = nullptr;
5175  pindexBestHeader = nullptr;
5176  pindexBestForkTip = nullptr;
5177  pindexBestForkBase = nullptr;
5179  if (mempool) {
5180  mempool->clear();
5181  }
5182  vinfoBlockFile.clear();
5183  nLastBlockFile = 0;
5184  setDirtyBlockIndex.clear();
5185  setDirtyFileInfo.clear();
5186  fHavePruned = false;
5187 }
5188 
5191  // Load block index from databases
5192  bool needs_init = fReindex;
5193  if (!fReindex) {
5194  bool ret = LoadBlockIndexDB(*this, params);
5195  if (!ret) {
5196  return false;
5197  }
5198 
5199  needs_init = m_blockman.m_block_index.empty();
5200  }
5201 
5202  if (needs_init) {
5203  // Everything here is for *new* reindex/DBs. Thus, though
5204  // LoadBlockIndexDB may have set fReindex if we shut down
5205  // mid-reindex previously, we don't check fReindex and
5206  // instead only check it prior to LoadBlockIndexDB to set
5207  // needs_init.
5208 
5209  LogPrintf("Initializing databases...\n");
5210  }
5211  return true;
5212 }
5213 
5215  LOCK(cs_main);
5216 
5217  // Check whether we're already initialized by checking for genesis in
5218  // m_blockman.m_block_index. Note that we can't use m_chain here, since it
5219  // is set based on the coins db, not the block index db, which is the only
5220  // thing loaded at this point.
5221  if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash())) {
5222  return true;
5223  }
5224 
5225  try {
5226  const CBlock &block = chainparams.GenesisBlock();
5227  FlatFilePos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
5228  if (blockPos.IsNull()) {
5229  return error("%s: writing genesis block to disk failed", __func__);
5230  }
5231  CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
5232  ReceivedBlockTransactions(block, pindex, blockPos);
5233  } catch (const std::runtime_error &e) {
5234  return error("%s: failed to write genesis block: %s", __func__,
5235  e.what());
5236  }
5237 
5238  return true;
5239 }
5240 
5241 bool LoadGenesisBlock(const CChainParams &chainparams) {
5243 }
5244 
5245 void LoadExternalBlockFile(const Config &config, FILE *fileIn,
5246  FlatFilePos *dbp) {
5247  // Map of disk positions for blocks with unknown parent (only used for
5248  // reindex)
5249  static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
5250  int64_t nStart = GetTimeMillis();
5251 
5252  const CChainParams &chainparams = config.GetChainParams();
5253 
5254  int nLoaded = 0;
5255  try {
5256  // This takes over fileIn and calls fclose() on it in the CBufferedFile
5257  // destructor. Make sure we have at least 2*MAX_TX_SIZE space in there
5258  // so any transaction can fit in the buffer.
5259  CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK,
5260  CLIENT_VERSION);
5261  uint64_t nRewind = blkdat.GetPos();
5262  while (!blkdat.eof()) {
5263  if (ShutdownRequested()) {
5264  return;
5265  }
5266 
5267  blkdat.SetPos(nRewind);
5268  // Start one byte further next time, in case of failure.
5269  nRewind++;
5270  // Remove former limit.
5271  blkdat.SetLimit();
5272  unsigned int nSize = 0;
5273  try {
5274  // Locate a header.
5276  blkdat.FindByte(chainparams.DiskMagic()[0]);
5277  nRewind = blkdat.GetPos() + 1;
5278  blkdat >> buf;
5279  if (memcmp(buf, chainparams.DiskMagic().data(),
5281  continue;
5282  }
5283 
5284  // Read size.
5285  blkdat >> nSize;
5286  if (nSize < 80) {
5287  continue;
5288  }
5289  } catch (const std::exception &) {
5290  // No valid block header found; don't complain.
5291  break;
5292  }
5293 
5294  try {
5295  // read block
5296  uint64_t nBlockPos = blkdat.GetPos();
5297  if (dbp) {
5298  dbp->nPos = nBlockPos;
5299  }
5300  blkdat.SetLimit(nBlockPos + nSize);
5301  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
5302  CBlock &block = *pblock;
5303  blkdat >> block;
5304  nRewind = blkdat.GetPos();
5305 
5306  const BlockHash hash = block.GetHash();
5307  {
5308  LOCK(cs_main);
5309  // detect out of order blocks, and store them for later
5310  if (hash != chainparams.GetConsensus().hashGenesisBlock &&
5311  !LookupBlockIndex(block.hashPrevBlock)) {
5312  LogPrint(
5314  "%s: Out of order block %s, parent %s not known\n",
5315  __func__, hash.ToString(),
5316  block.hashPrevBlock.ToString());
5317  if (dbp) {
5318  mapBlocksUnknownParent.insert(
5319  std::make_pair(block.hashPrevBlock, *dbp));
5320  }
5321  continue;
5322  }
5323 
5324  // process in case the block isn't known yet
5325  CBlockIndex *pindex = LookupBlockIndex(hash);
5326  if (!pindex || !pindex->nStatus.hasData()) {
5327  BlockValidationState state;
5328  if (::ChainstateActive().AcceptBlock(
5329  config, pblock, state, true, dbp, nullptr)) {
5330  nLoaded++;
5331  }
5332  if (state.IsError()) {
5333  break;
5334  }
5335  } else if (hash != chainparams.GetConsensus()
5336  .hashGenesisBlock &&
5337  pindex->nHeight % 1000 == 0) {
5338  LogPrint(
5340  "Block Import: already had block %s at height %d\n",
5341  hash.ToString(), pindex->nHeight);
5342  }
5343  }
5344 
5345  // Activate the genesis block so normal node progress can
5346  // continue
5347  if (hash == chainparams.GetConsensus().hashGenesisBlock) {
5348  BlockValidationState state;
5349  if (!ActivateBestChain(config, state, nullptr)) {
5350  break;
5351  }
5352  }
5353 
5354  NotifyHeaderTip();
5355 
5356  // Recursively process earlier encountered successors of this
5357  // block
5358  std::deque<uint256> queue;
5359  queue.push_back(hash);
5360  while (!queue.empty()) {
5361  uint256 head = queue.front();
5362  queue.pop_front();
5363  std::pair<std::multimap<uint256, FlatFilePos>::iterator,
5364  std::multimap<uint256, FlatFilePos>::iterator>
5365  range = mapBlocksUnknownParent.equal_range(head);
5366  while (range.first != range.second) {
5367  std::multimap<uint256, FlatFilePos>::iterator it =
5368  range.first;
5369  std::shared_ptr<CBlock> pblockrecursive =
5370  std::make_shared<CBlock>();
5371  if (ReadBlockFromDisk(*pblockrecursive, it->second,
5372  chainparams.GetConsensus())) {
5373  LogPrint(
5375  "%s: Processing out of order child %s of %s\n",
5376  __func__, pblockrecursive->GetHash().ToString(),
5377  head.ToString());
5378  LOCK(cs_main);
5379  BlockValidationState dummy;
5380  if (::ChainstateActive().AcceptBlock(
5381  config, pblockrecursive, dummy, true,
5382  &it->second, nullptr)) {
5383  nLoaded++;
5384  queue.push_back(pblockrecursive->GetHash());
5385  }
5386  }
5387  range.first++;
5388  mapBlocksUnknownParent.erase(it);
5389  NotifyHeaderTip();
5390  }
5391  }
5392  } catch (const std::exception &e) {
5393  LogPrintf("%s: Deserialize or I/O error - %s\n", __func__,
5394  e.what());
5395  }
5396  }
5397  } catch (const std::runtime_error &e) {
5398  AbortNode(std::string("System error: ") + e.what());
5399  }
5400 
5401  LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded,
5402  GetTimeMillis() - nStart);
5403 }
5404 
5405 void CChainState::CheckBlockIndex(const Consensus::Params &consensusParams) {
5406  if (!fCheckBlockIndex) {
5407  return;
5408  }
5409 
5410  LOCK(cs_main);
5411 
5412  // During a reindex, we read the genesis block and call CheckBlockIndex
5413  // before ActivateBestChain, so we have the genesis block in
5414  // m_blockman.m_block_index but no active chain. (A few of the tests when
5415  // iterating the block tree require that m_chain has been initialized.)
5416  if (m_chain.Height() < 0) {
5417  assert(m_blockman.m_block_index.size() <= 1);
5418  return;
5419  }
5420 
5421  // Build forward-pointing map of the entire block tree.
5422  std::multimap<CBlockIndex *, CBlockIndex *> forward;
5423  for (const auto &entry : m_blockman.m_block_index) {
5424  forward.emplace(entry.second->pprev, entry.second);
5425  }
5426 
5427  assert(forward.size() == m_blockman.m_block_index.size());
5428 
5429  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5430  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5431  rangeGenesis = forward.equal_range(nullptr);
5432  CBlockIndex *pindex = rangeGenesis.first->second;
5433  rangeGenesis.first++;
5434  // There is only one index entry with parent nullptr.
5435  assert(rangeGenesis.first == rangeGenesis.second);
5436 
5437  // Iterate over the entire block tree, using depth-first search.
5438  // Along the way, remember whether there are blocks on the path from genesis
5439  // block being explored which are the first to have certain properties.
5440  size_t nNodes = 0;
5441  int nHeight = 0;
5442  // Oldest ancestor of pindex which is invalid.
5443  CBlockIndex *pindexFirstInvalid = nullptr;
5444  // Oldest ancestor of pindex which is parked.
5445  CBlockIndex *pindexFirstParked = nullptr;
5446  // Oldest ancestor of pindex which does not have data available.
5447  CBlockIndex *pindexFirstMissing = nullptr;
5448  // Oldest ancestor of pindex for which nTx == 0.
5449  CBlockIndex *pindexFirstNeverProcessed = nullptr;
5450  // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE
5451  // (regardless of being valid or not).
5452  CBlockIndex *pindexFirstNotTreeValid = nullptr;
5453  // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS
5454  // (regardless of being valid or not).
5455  CBlockIndex *pindexFirstNotTransactionsValid = nullptr;
5456  // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN
5457  // (regardless of being valid or not).
5458  CBlockIndex *pindexFirstNotChainValid = nullptr;
5459  // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS
5460  // (regardless of being valid or not).
5461  CBlockIndex *pindexFirstNotScriptsValid = nullptr;
5462  while (pindex != nullptr) {
5463  nNodes++;
5464  if (pindexFirstInvalid == nullptr && pindex->nStatus.hasFailed()) {
5465  pindexFirstInvalid = pindex;
5466  }
5467  if (pindexFirstParked == nullptr && pindex->nStatus.isParked()) {
5468  pindexFirstParked = pindex;
5469  }
5470  if (pindexFirstMissing == nullptr && !pindex->nStatus.hasData()) {
5471  pindexFirstMissing = pindex;
5472  }
5473  if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) {
5474  pindexFirstNeverProcessed = pindex;
5475  }
5476  if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr &&
5477  pindex->nStatus.getValidity() < BlockValidity::TREE) {
5478  pindexFirstNotTreeValid = pindex;
5479  }
5480  if (pindex->pprev != nullptr &&
5481  pindexFirstNotTransactionsValid == nullptr &&
5483  pindexFirstNotTransactionsValid = pindex;
5484  }
5485  if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr &&
5487  pindexFirstNotChainValid = pindex;
5488  }
5489  if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr &&
5491  pindexFirstNotScriptsValid = pindex;
5492  }
5493 
5494  // Begin: actual consistency checks.
5495  if (pindex->pprev == nullptr) {
5496  // Genesis block checks.
5497  // Genesis block's hash must match.
5498  assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock);
5499  // The current active chain's genesis block must be this block.
5500  assert(pindex == m_chain.Genesis());
5501  }
5502  if (!pindex->HaveTxsDownloaded()) {
5503  // nSequenceId can't be set positive for blocks that aren't linked
5504  // (negative is used for preciousblock)
5505  assert(pindex->nSequenceId <= 0);
5506  }
5507  // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or
5508  // not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0
5509  // (or VALID_TRANSACTIONS) if no pruning has occurred.
5510  if (!fHavePruned) {
5511  // If we've never pruned, then HAVE_DATA should be equivalent to nTx
5512  // > 0
5513  assert(pindex->nStatus.hasData() == (pindex->nTx > 0));
5514  assert(pindexFirstMissing == pindexFirstNeverProcessed);
5515  } else if (pindex->nStatus.hasData()) {
5516  // If we have pruned, then we can only say that HAVE_DATA implies
5517  // nTx > 0
5518  assert(pindex->nTx > 0);
5519  }
5520  if (pindex->nStatus.hasUndo()) {
5521  assert(pindex->nStatus.hasData());
5522  }
5523  // This is pruning-independent.
5524  assert((pindex->nStatus.getValidity() >= BlockValidity::TRANSACTIONS) ==
5525  (pindex->nTx > 0));
5526  // All parents having had data (at some point) is equivalent to all
5527  // parents being VALID_TRANSACTIONS, which is equivalent to
5528  // HaveTxsDownloaded(). All parents having had data (at some point) is
5529  // equivalent to all parents being VALID_TRANSACTIONS, which is
5530  // equivalent to HaveTxsDownloaded().
5531  assert((pindexFirstNeverProcessed == nullptr) ==
5532  (pindex->HaveTxsDownloaded()));
5533  assert((pindexFirstNotTransactionsValid == nullptr) ==
5534  (pindex->HaveTxsDownloaded()));
5535  // nHeight must be consistent.
5536  assert(pindex->nHeight == nHeight);
5537  // For every block except the genesis block, the chainwork must be
5538  // larger than the parent's.
5539  assert(pindex->pprev == nullptr ||
5540  pindex->nChainWork >= pindex->pprev->nChainWork);
5541  // The pskip pointer must point back for all but the first 2 blocks.
5542  assert(nHeight < 2 ||
5543  (pindex->pskip && (pindex->pskip->nHeight < nHeight)));
5544  // All m_blockman.m_block_index entries must at least be TREE valid
5545  assert(pindexFirstNotTreeValid == nullptr);
5546  if (pindex->nStatus.getValidity() >= BlockValidity::TREE) {
5547  // TREE valid implies all parents are TREE valid
5548  assert(pindexFirstNotTreeValid == nullptr);
5549  }
5550  if (pindex->