Bitcoin ABC  0.24.7
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2016 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <avalanche/avalanche.h>
10 #include <avalanche/peermanager.h>
11 #include <avalanche/processor.h>
12 #include <avalanche/proof.h>
13 #include <avalanche/validation.h>
14 #include <banman.h>
15 #include <blockdb.h>
16 #include <blockencodings.h>
17 #include <blockfilter.h>
18 #include <blockvalidity.h>
19 #include <chain.h>
20 #include <chainparams.h>
21 #include <config.h>
22 #include <consensus/validation.h>
23 #include <hash.h>
24 #include <index/blockfilterindex.h>
25 #include <merkleblock.h>
26 #include <netbase.h>
27 #include <netmessagemaker.h>
28 #include <policy/fees.h>
29 #include <policy/policy.h>
30 #include <primitives/block.h>
31 #include <primitives/transaction.h>
32 #include <random.h>
33 #include <reverse_iterator.h>
34 #include <scheduler.h>
35 #include <streams.h>
36 #include <tinyformat.h>
37 #include <txmempool.h>
38 #include <util/check.h> // For NDEBUG compile time check
39 #include <util/strencodings.h>
40 #include <util/system.h>
41 #include <validation.h>
42 
43 #include <memory>
44 #include <typeinfo>
45 
47 static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
49 static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
51 static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME =
52  std::chrono::minutes{15};
55 static constexpr std::chrono::seconds UNCONDITIONAL_RELAY_DELAY =
56  std::chrono::minutes{2};
61 // 15 minutes
62 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000;
63 // 1ms/header
64 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000;
69 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
74 // 20 minutes
75 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60;
77 // 10 minutes
78 static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60;
82 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
87 static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
89 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
92 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
95 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
99 static constexpr std::chrono::minutes PING_INTERVAL{2};
101 static const unsigned int MAX_LOCATOR_SZ = 101;
103 static const unsigned int MAX_INV_SZ = 50000;
104 static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv),
105  "Max protocol message length must be greater than largest "
106  "possible INV message");
107 
115 
125 
127  const std::chrono::seconds nonpref_peer_delay;
128 
133  const std::chrono::seconds overloaded_peer_delay;
134 
139  const std::chrono::microseconds getdata_interval;
140 
146 };
147 
149  100, // max_peer_request_in_flight
150  5000, // max_peer_announcements
151  std::chrono::seconds(2), // nonpref_peer_delay
152  std::chrono::seconds(2), // overloaded_peer_delay
153  std::chrono::seconds(60), // getdata_interval
154  PF_RELAY, // bypass_request_limits_permissions
155 };
156 
158  100, // max_peer_request_in_flight
159  5000, // max_peer_announcements
160  std::chrono::seconds(2), // nonpref_peer_delay
161  std::chrono::seconds(2), // overloaded_peer_delay
162  std::chrono::seconds(60), // getdata_interval
163  PF_BYPASS_PROOF_REQUEST_LIMITS, // bypass_request_limits_permissions
164 };
165 
170 static const unsigned int MAX_GETDATA_SZ = 1000;
174 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
179 static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
186 static const unsigned int MAX_HEADERS_RESULTS = 2000;
191 static const int MAX_CMPCTBLOCK_DEPTH = 5;
196 static const int MAX_BLOCKTXN_DEPTH = 10;
204 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
209 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000;
213 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000;
218 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
220 static const int MAX_UNCONNECTING_HEADERS = 10;
222 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
226 static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24};
230 static const std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30};
236 static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
241 static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
243 static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB =
246 static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
255  std::chrono::seconds{1},
256  "INVENTORY_RELAY_MAX too low");
257 
261 static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
265 static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
270 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
275 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
280 static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
281 
282 struct COrphanTx {
283  // When modifying, adapt the copy of this definition in tests/DoS_tests.
286  int64_t nTimeExpire;
287  size_t list_pos;
288 };
289 
296 std::map<TxId, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
297 
298 void EraseOrphansFor(NodeId peer);
299 
300 // Internal stuff
301 namespace {
303 int nSyncStarted GUARDED_BY(cs_main) = 0;
304 
311 std::map<BlockHash, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
312 
331 std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
332 uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
333 
344 Mutex cs_rejectedProofs;
345 std::unique_ptr<CRollingBloomFilter>
346  rejectedProofs GUARDED_BY(cs_rejectedProofs);
347 
353 Mutex g_cs_recent_confirmed_transactions;
354 std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions
355  GUARDED_BY(g_cs_recent_confirmed_transactions);
356 
360 struct QueuedBlock {
361  BlockHash hash;
363  const CBlockIndex *pindex;
365  bool fValidatedHeaders;
367  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
368 };
369 std::map<BlockHash, std::pair<NodeId, std::list<QueuedBlock>::iterator>>
370  mapBlocksInFlight GUARDED_BY(cs_main);
371 
373 std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
374 
376 int nPreferredDownload GUARDED_BY(cs_main) = 0;
377 
379 int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
380 
382 int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
383 
385 std::atomic<int64_t> g_last_tip_update(0);
386 
388 typedef std::map<uint256, CTransactionRef> MapRelay;
389 MapRelay mapRelay GUARDED_BY(cs_main);
394 std::deque<std::pair<int64_t, MapRelay::iterator>>
395  vRelayExpiration GUARDED_BY(cs_main);
396 
397 struct IteratorComparator {
398  template <typename I> bool operator()(const I &a, const I &b) const {
399  return &(*a) < &(*b);
400  }
401 };
402 
407 std::map<COutPoint,
408  std::set<std::map<TxId, COrphanTx>::iterator, IteratorComparator>>
409  mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
410 
412 std::vector<std::map<TxId, COrphanTx>::iterator>
413  g_orphan_list GUARDED_BY(g_cs_orphans);
414 
421 static std::vector<std::pair<TxHash, CTransactionRef>>
422  vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
424 static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
425 } // namespace
426 
427 namespace {
434 struct CNodeState {
436  const CService address;
438  bool fCurrentlyConnected;
440  const CBlockIndex *pindexBestKnownBlock;
442  BlockHash hashLastUnknownBlock;
444  const CBlockIndex *pindexLastCommonBlock;
446  const CBlockIndex *pindexBestHeaderSent;
448  int nUnconnectingHeaders;
450  bool fSyncStarted;
452  int64_t nHeadersSyncTimeout;
455  int64_t nStallingSince;
456  std::list<QueuedBlock> vBlocksInFlight;
459  int64_t nDownloadingSince;
460  int nBlocksInFlight;
461  int nBlocksInFlightValidHeaders;
463  bool fPreferredDownload;
466  bool fPreferHeaders;
469  bool fPreferHeaderAndIDs;
476  bool fProvidesHeaderAndIDs;
482  bool fSupportsDesiredCmpctVersion;
483 
510  struct ChainSyncTimeoutState {
513  int64_t m_timeout;
515  const CBlockIndex *m_work_header;
517  bool m_sent_getheaders;
520  bool m_protect;
521  };
522 
523  ChainSyncTimeoutState m_chain_sync;
524 
526  int64_t m_last_block_announcement;
527 
528  struct AvalancheState {
529  std::chrono::time_point<std::chrono::steady_clock> last_poll;
530  };
531 
532  AvalancheState m_avalanche_state;
533 
535  bool m_is_inbound;
536 
538  bool m_is_manual_connection;
539 
541  CRollingBloomFilter m_recently_announced_invs =
543 
545  CRollingBloomFilter m_recently_announced_proofs =
547 
548  CNodeState(CAddress addrIn, bool is_inbound, bool is_manual)
549  : address(addrIn), m_is_inbound(is_inbound),
550  m_is_manual_connection(is_manual) {
551  fCurrentlyConnected = false;
552  pindexBestKnownBlock = nullptr;
553  hashLastUnknownBlock = BlockHash();
554  pindexLastCommonBlock = nullptr;
555  pindexBestHeaderSent = nullptr;
556  nUnconnectingHeaders = 0;
557  fSyncStarted = false;
558  nHeadersSyncTimeout = 0;
559  nStallingSince = 0;
560  nDownloadingSince = 0;
561  nBlocksInFlight = 0;
562  nBlocksInFlightValidHeaders = 0;
563  fPreferredDownload = false;
564  fPreferHeaders = false;
565  fPreferHeaderAndIDs = false;
566  fProvidesHeaderAndIDs = false;
567  fSupportsDesiredCmpctVersion = false;
568  m_chain_sync = {0, nullptr, false, false};
569  m_last_block_announcement = 0;
570  m_recently_announced_invs.reset();
571  m_recently_announced_proofs.reset();
572  }
573 };
574 
576 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
577 
578 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
579  std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
580  if (it == mapNodeState.end()) {
581  return nullptr;
582  }
583 
584  return &it->second;
585 }
586 
598 struct Peer {
600  const NodeId m_id{0};
601 
603  Mutex m_misbehavior_mutex;
605  int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
608  bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
609 
614  std::set<TxId> m_orphan_work_set GUARDED_BY(g_cs_orphans);
615 
617  Mutex m_getdata_requests_mutex;
619  std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
620 
621  Peer(NodeId id) : m_id(id) {}
622 };
623 
624 using PeerRef = std::shared_ptr<Peer>;
625 
632 Mutex g_peer_mutex;
633 static std::map<NodeId, PeerRef> g_peer_map GUARDED_BY(g_peer_mutex);
634 
639 static PeerRef GetPeerRef(NodeId id) {
640  LOCK(g_peer_mutex);
641  auto it = g_peer_map.find(id);
642  return it != g_peer_map.end() ? it->second : nullptr;
643 }
644 
645 static bool isPreferredDownloadPeer(const CNode &pfrom) {
646  LOCK(cs_main);
647  const CNodeState *state = State(pfrom.GetId());
648  return state && state->fPreferredDownload;
649 }
650 
651 static void UpdatePreferredDownload(const CNode &node, CNodeState *state)
653  nPreferredDownload -= state->fPreferredDownload;
654 
655  // Whether this node should be marked as a preferred download node.
656  state->fPreferredDownload =
657  (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) &&
658  !node.IsAddrFetchConn() && !node.fClient;
659 
660  nPreferredDownload += state->fPreferredDownload;
661 }
662 
663 static void PushNodeVersion(const Config &config, CNode &pnode,
664  CConnman &connman, int64_t nTime) {
665  // Note that pnode.GetLocalServices() is a reflection of the local
666  // services we were offering when the CNode object was created for this
667  // peer.
668  ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
669  uint64_t nonce = pnode.GetLocalNonce();
670  int nNodeStartingHeight = pnode.GetMyStartingHeight();
671  NodeId nodeid = pnode.GetId();
672  CAddress addr = pnode.addr;
673  uint64_t extraEntropy = pnode.GetLocalExtraEntropy();
674 
675  CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr)
676  ? addr
677  : CAddress(CService(), addr.nServices));
678  CAddress addrMe = CAddress(CService(), nLocalNodeServices);
679 
680  connman.PushMessage(
681  &pnode,
684  uint64_t(nLocalNodeServices), nTime, addrYou, addrMe, nonce,
685  userAgent(config), nNodeStartingHeight,
686  ::g_relay_txes && pnode.m_tx_relay != nullptr, extraEntropy));
687 
688  if (fLogIPs) {
690  "send version message: version %d, blocks=%d, us=%s, them=%s, "
691  "peer=%d\n",
692  PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(),
693  addrYou.ToString(), nodeid);
694  } else {
695  LogPrint(
696  BCLog::NET,
697  "send version message: version %d, blocks=%d, us=%s, peer=%d\n",
698  PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
699  }
700  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
701 }
702 
703 // Returns a bool indicating whether we requested this block.
704 // Also used if a block was /not/ received and timed out or started with another
705 // peer.
706 static bool MarkBlockAsReceived(const BlockHash &hash)
708  std::map<BlockHash,
709  std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
710  itInFlight = mapBlocksInFlight.find(hash);
711  if (itInFlight != mapBlocksInFlight.end()) {
712  CNodeState *state = State(itInFlight->second.first);
713  assert(state != nullptr);
714  state->nBlocksInFlightValidHeaders -=
715  itInFlight->second.second->fValidatedHeaders;
716  if (state->nBlocksInFlightValidHeaders == 0 &&
717  itInFlight->second.second->fValidatedHeaders) {
718  // Last validated block on the queue was received.
719  nPeersWithValidatedDownloads--;
720  }
721  if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
722  // First block on the queue was received, update the start download
723  // time for the next one
724  state->nDownloadingSince = std::max(
725  state->nDownloadingSince,
726  count_microseconds(GetTime<std::chrono::microseconds>()));
727  }
728  state->vBlocksInFlight.erase(itInFlight->second.second);
729  state->nBlocksInFlight--;
730  state->nStallingSince = 0;
731  mapBlocksInFlight.erase(itInFlight);
732  return true;
733  }
734 
735  return false;
736 }
737 
738 // returns false, still setting pit, if the block was already in flight from the
739 // same peer
740 // pit will only be valid as long as the same cs_main lock is being held.
741 static bool
742 MarkBlockAsInFlight(const Config &config, CTxMemPool &mempool, NodeId nodeid,
743  const BlockHash &hash,
744  const Consensus::Params &consensusParams,
745  const CBlockIndex *pindex = nullptr,
746  std::list<QueuedBlock>::iterator **pit = nullptr)
748  CNodeState *state = State(nodeid);
749  assert(state != nullptr);
750 
751  // Short-circuit most stuff in case it is from the same node.
752  std::map<BlockHash,
753  std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
754  itInFlight = mapBlocksInFlight.find(hash);
755  if (itInFlight != mapBlocksInFlight.end() &&
756  itInFlight->second.first == nodeid) {
757  if (pit) {
758  *pit = &itInFlight->second.second;
759  }
760  return false;
761  }
762 
763  // Make sure it's not listed somewhere already.
764  MarkBlockAsReceived(hash);
765 
766  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
767  state->vBlocksInFlight.end(),
768  {hash, pindex, pindex != nullptr,
769  std::unique_ptr<PartiallyDownloadedBlock>(
770  pit ? new PartiallyDownloadedBlock(config, &mempool) : nullptr)});
771  state->nBlocksInFlight++;
772  state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
773  if (state->nBlocksInFlight == 1) {
774  // We're starting a block download (batch) from this peer.
775  state->nDownloadingSince = GetTime<std::chrono::microseconds>().count();
776  }
777 
778  if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
779  nPeersWithValidatedDownloads++;
780  }
781 
782  itInFlight = mapBlocksInFlight
783  .insert(std::make_pair(hash, std::make_pair(nodeid, it)))
784  .first;
785 
786  if (pit) {
787  *pit = &itInFlight->second.second;
788  }
789 
790  return true;
791 }
792 
794 static void ProcessBlockAvailability(NodeId nodeid)
796  CNodeState *state = State(nodeid);
797  assert(state != nullptr);
798 
799  if (!state->hashLastUnknownBlock.IsNull()) {
800  const CBlockIndex *pindex =
801  LookupBlockIndex(state->hashLastUnknownBlock);
802  if (pindex && pindex->nChainWork > 0) {
803  if (state->pindexBestKnownBlock == nullptr ||
804  pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
805  state->pindexBestKnownBlock = pindex;
806  }
807  state->hashLastUnknownBlock.SetNull();
808  }
809  }
810 }
811 
813 static void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash)
815  CNodeState *state = State(nodeid);
816  assert(state != nullptr);
817 
818  ProcessBlockAvailability(nodeid);
819 
820  const CBlockIndex *pindex = LookupBlockIndex(hash);
821  if (pindex && pindex->nChainWork > 0) {
822  // An actually better block was announced.
823  if (state->pindexBestKnownBlock == nullptr ||
824  pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
825  state->pindexBestKnownBlock = pindex;
826  }
827  } else {
828  // An unknown block was announced; just assume that the latest one is
829  // the best one.
830  state->hashLastUnknownBlock = hash;
831  }
832 }
833 
840 static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid,
841  CConnman &connman)
844  CNodeState *nodestate = State(nodeid);
845  if (!nodestate) {
846  LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
847  return;
848  }
849  if (!nodestate->fProvidesHeaderAndIDs) {
850  return;
851  }
852  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
853  it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
854  if (*it == nodeid) {
855  lNodesAnnouncingHeaderAndIDs.erase(it);
856  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
857  return;
858  }
859  }
860  connman.ForNode(nodeid, [&connman](CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(
861  ::cs_main) {
863  uint64_t nCMPCTBLOCKVersion = 1;
864  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
865  // As per BIP152, we only get 3 of our peers to announce
866  // blocks using compact encodings.
867  connman.ForNode(
868  lNodesAnnouncingHeaderAndIDs.front(),
869  [&connman, nCMPCTBLOCKVersion](CNode *pnodeStop) {
870  connman.PushMessage(
871  pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion())
872  .Make(NetMsgType::SENDCMPCT,
873  /*fAnnounceUsingCMPCTBLOCK=*/false,
874  nCMPCTBLOCKVersion));
875  return true;
876  });
877  lNodesAnnouncingHeaderAndIDs.pop_front();
878  }
879  connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion())
881  /*fAnnounceUsingCMPCTBLOCK=*/true,
882  nCMPCTBLOCKVersion));
883  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
884  return true;
885  });
886 }
887 
888 static bool TipMayBeStale(const Consensus::Params &consensusParams)
891  if (g_last_tip_update == 0) {
892  g_last_tip_update = GetTime();
893  }
894  return g_last_tip_update <
895  GetTime() - consensusParams.nPowTargetSpacing * 3 &&
896  mapBlocksInFlight.empty();
897 }
898 
899 static bool CanDirectFetch(const Consensus::Params &consensusParams)
902  GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
903 }
904 
905 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
907  if (state->pindexBestKnownBlock &&
908  pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
909  return true;
910  }
911  if (state->pindexBestHeaderSent &&
912  pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
913  return true;
914  }
915  return false;
916 }
917 
922 static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count,
923  std::vector<const CBlockIndex *> &vBlocks,
924  NodeId &nodeStaller,
925  const Consensus::Params &consensusParams)
927  if (count == 0) {
928  return;
929  }
930 
931  vBlocks.reserve(vBlocks.size() + count);
932  CNodeState *state = State(nodeid);
933  assert(state != nullptr);
934 
935  // Make sure pindexBestKnownBlock is up to date, we'll need it.
936  ProcessBlockAvailability(nodeid);
937 
938  if (state->pindexBestKnownBlock == nullptr ||
939  state->pindexBestKnownBlock->nChainWork <
940  ::ChainActive().Tip()->nChainWork ||
941  state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
942  // This peer has nothing interesting.
943  return;
944  }
945 
946  if (state->pindexLastCommonBlock == nullptr) {
947  // Bootstrap quickly by guessing a parent of our best tip is the forking
948  // point. Guessing wrong in either direction is not a problem.
949  state->pindexLastCommonBlock = ::ChainActive()[std::min(
950  state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())];
951  }
952 
953  // If the peer reorganized, our previous pindexLastCommonBlock may not be an
954  // ancestor of its current tip anymore. Go back enough to fix that.
955  state->pindexLastCommonBlock = LastCommonAncestor(
956  state->pindexLastCommonBlock, state->pindexBestKnownBlock);
957  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
958  return;
959  }
960 
961  std::vector<const CBlockIndex *> vToFetch;
962  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
963  // Never fetch further than the best block we know the peer has, or more
964  // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
965  // common with this peer. The +1 is so we can detect stalling, namely if we
966  // would be able to download that next block if the window were 1 larger.
967  int nWindowEnd =
968  state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
969  int nMaxHeight =
970  std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
971  NodeId waitingfor = -1;
972  while (pindexWalk->nHeight < nMaxHeight) {
973  // Read up to 128 (or more, if more blocks than that are needed)
974  // successors of pindexWalk (towards pindexBestKnownBlock) into
975  // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
976  // expensive as iterating over ~100 CBlockIndex* entries anyway.
977  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
978  std::max<int>(count - vBlocks.size(), 128));
979  vToFetch.resize(nToFetch);
980  pindexWalk = state->pindexBestKnownBlock->GetAncestor(
981  pindexWalk->nHeight + nToFetch);
982  vToFetch[nToFetch - 1] = pindexWalk;
983  for (unsigned int i = nToFetch - 1; i > 0; i--) {
984  vToFetch[i - 1] = vToFetch[i]->pprev;
985  }
986 
987  // Iterate over those blocks in vToFetch (in forward direction), adding
988  // the ones that are not yet downloaded and not in flight to vBlocks. In
989  // the meantime, update pindexLastCommonBlock as long as all ancestors
990  // are already downloaded, or if it's already part of our chain (and
991  // therefore don't need it even if pruned).
992  for (const CBlockIndex *pindex : vToFetch) {
993  if (!pindex->IsValid(BlockValidity::TREE)) {
994  // We consider the chain that this peer is on invalid.
995  return;
996  }
997  if (pindex->nStatus.hasData() || ::ChainActive().Contains(pindex)) {
998  if (pindex->HaveTxsDownloaded()) {
999  state->pindexLastCommonBlock = pindex;
1000  }
1001  } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
1002  // The block is not already downloaded, and not yet in flight.
1003  if (pindex->nHeight > nWindowEnd) {
1004  // We reached the end of the window.
1005  if (vBlocks.size() == 0 && waitingfor != nodeid) {
1006  // We aren't able to fetch anything, but we would be if
1007  // the download window was one larger.
1008  nodeStaller = waitingfor;
1009  }
1010  return;
1011  }
1012  vBlocks.push_back(pindex);
1013  if (vBlocks.size() == count) {
1014  return;
1015  }
1016  } else if (waitingfor == -1) {
1017  // This is the first already-in-flight block.
1018  waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
1019  }
1020  }
1021  }
1022 }
1023 
1024 } // namespace
1025 
1026 template <class InvId>
1027 static bool TooManyAnnouncements(const CNode &node,
1028  const InvRequestTracker<InvId> &requestTracker,
1029  const DataRequestParameters &requestParams) {
1030  return !node.HasPermission(
1031  requestParams.bypass_request_limits_permissions) &&
1032  requestTracker.Count(node.GetId()) >=
1033  requestParams.max_peer_announcements;
1034 }
1035 
1042 template <class InvId>
1043 static std::chrono::microseconds
1045  const InvRequestTracker<InvId> &requestTracker,
1046  const DataRequestParameters &requestParams,
1047  std::chrono::microseconds current_time, bool preferred) {
1048  auto delay = std::chrono::microseconds{0};
1049 
1050  if (!preferred) {
1051  delay += requestParams.nonpref_peer_delay;
1052  }
1053 
1054  if (!node.HasPermission(requestParams.bypass_request_limits_permissions) &&
1055  requestTracker.CountInFlight(node.GetId()) >=
1056  requestParams.max_peer_request_in_flight) {
1057  delay += requestParams.overloaded_peer_delay;
1058  }
1059 
1060  return current_time + delay;
1061 }
1062 
1063 void PeerManager::AddTxAnnouncement(const CNode &node, const TxId &txid,
1064  std::chrono::microseconds current_time) {
1065  // For m_txrequest and state
1067 
1068  if (TooManyAnnouncements(node, m_txrequest, TX_REQUEST_PARAMS)) {
1069  return;
1070  }
1071 
1072  const bool preferred = isPreferredDownloadPeer(node);
1073  auto reqtime = ComputeRequestTime(node, m_txrequest, TX_REQUEST_PARAMS,
1074  current_time, preferred);
1075 
1076  m_txrequest.ReceivedInv(node.GetId(), txid, preferred, reqtime);
1077 }
1078 
1079 void PeerManager::AddProofAnnouncement(const CNode &node,
1080  const avalanche::ProofId &proofid,
1081  std::chrono::microseconds current_time,
1082  bool preferred) {
1083  // For m_proofrequest
1085 
1086  if (TooManyAnnouncements(node, m_proofrequest, PROOF_REQUEST_PARAMS)) {
1087  return;
1088  }
1089 
1090  auto reqtime = ComputeRequestTime(
1091  node, m_proofrequest, PROOF_REQUEST_PARAMS, current_time, preferred);
1092 
1093  m_proofrequest.ReceivedInv(node.GetId(), proofid, preferred, reqtime);
1094 }
1095 
1096 // This function is used for testing the stale tip eviction logic, see
1097 // denialofservice_tests.cpp
1098 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) {
1099  LOCK(cs_main);
1100  CNodeState *state = State(node);
1101  if (state) {
1102  state->m_last_block_announcement = time_in_seconds;
1103  }
1104 }
1105 
1106 void PeerManager::InitializeNode(const Config &config, CNode *pnode) {
1107  CAddress addr = pnode->addr;
1108  std::string addrName = pnode->GetAddrName();
1109  NodeId nodeid = pnode->GetId();
1110  {
1111  LOCK(cs_main);
1112  mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct,
1113  std::forward_as_tuple(nodeid),
1114  std::forward_as_tuple(addr,
1115  pnode->IsInboundConn(),
1116  pnode->IsManualConn()));
1117  assert(m_txrequest.Count(nodeid) == 0);
1118  }
1119  {
1120  PeerRef peer = std::make_shared<Peer>(nodeid);
1121  LOCK(g_peer_mutex);
1122  g_peer_map.emplace_hint(g_peer_map.end(), nodeid, std::move(peer));
1123  }
1124  if (!pnode->IsInboundConn()) {
1125  PushNodeVersion(config, *pnode, m_connman, GetTime());
1126  }
1127 }
1128 
1130  std::set<TxId> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
1131 
1132  for (const TxId &txid : unbroadcast_txids) {
1133  // Sanity check: all unbroadcast txns should exist in the mempool
1134  if (m_mempool.exists(txid)) {
1135  RelayTransaction(txid, m_connman);
1136  } else {
1137  m_mempool.RemoveUnbroadcastTx(txid, true);
1138  }
1139  }
1140 
1142  g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
1143  auto unbroadcasted_proofids = pm.getUnbroadcastProofs();
1144 
1145  for (const auto &proofid : unbroadcasted_proofids) {
1146  // Sanity check: all unbroadcast proofs should be bound to a
1147  // peer in the peermanager
1148  if (pm.isBoundToPeer(proofid)) {
1149  RelayProof(proofid, m_connman);
1150  } else {
1151  pm.removeUnbroadcastProof(proofid);
1152  }
1153  }
1154  });
1155  }
1156 
1157  // Schedule next run for 10-15 minutes in the future.
1158  // We add randomness on every cycle to avoid the possibility of P2P
1159  // fingerprinting.
1160  const std::chrono::milliseconds delta =
1161  std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
1162  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
1163  delta);
1164 }
1165 
1167  m_connman.ForEachNode([](CNode *pnode) {
1168  if (pnode->m_avalanche_state) {
1169  pnode->m_avalanche_state->updateAvailabilityScore();
1170  }
1171  });
1172 }
1173 
1174 void PeerManager::FinalizeNode(const Config &config, NodeId nodeid,
1175  bool &fUpdateConnectionTime) {
1176  fUpdateConnectionTime = false;
1177  {
1178  LOCK(cs_main);
1179  int misbehavior{0};
1180  {
1181  PeerRef peer = GetPeerRef(nodeid);
1182  assert(peer != nullptr);
1183  misbehavior = WITH_LOCK(peer->m_misbehavior_mutex,
1184  return peer->m_misbehavior_score);
1185  LOCK(g_peer_mutex);
1186  g_peer_map.erase(nodeid);
1187  }
1188  CNodeState *state = State(nodeid);
1189  assert(state != nullptr);
1190 
1191  if (state->fSyncStarted) {
1192  nSyncStarted--;
1193  }
1194 
1195  if (misbehavior == 0 && state->fCurrentlyConnected) {
1196  fUpdateConnectionTime = true;
1197  }
1198 
1199  for (const QueuedBlock &entry : state->vBlocksInFlight) {
1200  mapBlocksInFlight.erase(entry.hash);
1201  }
1202  EraseOrphansFor(nodeid);
1203  m_txrequest.DisconnectedPeer(nodeid);
1204  nPreferredDownload -= state->fPreferredDownload;
1205  nPeersWithValidatedDownloads -=
1206  (state->nBlocksInFlightValidHeaders != 0);
1207  assert(nPeersWithValidatedDownloads >= 0);
1208  g_outbound_peers_with_protect_from_disconnect -=
1209  state->m_chain_sync.m_protect;
1210  assert(g_outbound_peers_with_protect_from_disconnect >= 0);
1211 
1212  mapNodeState.erase(nodeid);
1213 
1214  if (mapNodeState.empty()) {
1215  // Do a consistency check after the last peer is removed.
1216  assert(mapBlocksInFlight.empty());
1217  assert(nPreferredDownload == 0);
1218  assert(nPeersWithValidatedDownloads == 0);
1219  assert(g_outbound_peers_with_protect_from_disconnect == 0);
1220  assert(m_txrequest.Size() == 0);
1221  }
1222  }
1223 
1224  WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
1225 
1226  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
1227 }
1228 
1230  {
1231  LOCK(cs_main);
1232  CNodeState *state = State(nodeid);
1233  if (state == nullptr) {
1234  return false;
1235  }
1236  stats.nSyncHeight = state->pindexBestKnownBlock
1237  ? state->pindexBestKnownBlock->nHeight
1238  : -1;
1239  stats.nCommonHeight = state->pindexLastCommonBlock
1240  ? state->pindexLastCommonBlock->nHeight
1241  : -1;
1242  for (const QueuedBlock &queue : state->vBlocksInFlight) {
1243  if (queue.pindex) {
1244  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
1245  }
1246  }
1247  }
1248 
1249  PeerRef peer = GetPeerRef(nodeid);
1250  if (peer == nullptr) {
1251  return false;
1252  }
1253  stats.m_misbehavior_score =
1254  WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
1255 
1256  return true;
1257 }
1258 
1260 //
1261 // mapOrphanTransactions
1262 //
1263 
1266  size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn",
1268  if (max_extra_txn <= 0) {
1269  return;
1270  }
1271 
1272  if (!vExtraTxnForCompact.size()) {
1273  vExtraTxnForCompact.resize(max_extra_txn);
1274  }
1275 
1276  vExtraTxnForCompact[vExtraTxnForCompactIt] =
1277  std::make_pair(tx->GetHash(), tx);
1278  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
1279 }
1280 
1281 bool AddOrphanTx(const CTransactionRef &tx, NodeId peer)
1283  const TxId &txid = tx->GetId();
1284  if (mapOrphanTransactions.count(txid)) {
1285  return false;
1286  }
1287 
1288  // Ignore big transactions, to avoid a send-big-orphans memory exhaustion
1289  // attack. If a peer has a legitimate large transaction with a missing
1290  // parent then we assume it will rebroadcast it later, after the parent
1291  // transaction(s) have been mined or received.
1292  // 100 orphans, each of which is at most 100,000 bytes big is at most 10
1293  // megabytes of orphans and somewhat more byprev index (in the worst case):
1294  unsigned int sz = tx->GetTotalSize();
1295  if (sz > MAX_STANDARD_TX_SIZE) {
1297  "ignoring large orphan tx (size: %u, hash: %s)\n", sz,
1298  txid.ToString());
1299  return false;
1300  }
1301 
1302  auto ret = mapOrphanTransactions.emplace(
1303  txid, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME,
1304  g_orphan_list.size()});
1305  assert(ret.second);
1306  g_orphan_list.push_back(ret.first);
1307  for (const CTxIn &txin : tx->vin) {
1308  mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
1309  }
1310 
1312 
1313  LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n",
1314  txid.ToString(), mapOrphanTransactions.size(),
1315  mapOrphanTransactionsByPrev.size());
1316  return true;
1317 }
1318 
1320  const auto it = mapOrphanTransactions.find(id);
1321  if (it == mapOrphanTransactions.end()) {
1322  return 0;
1323  }
1324  for (const CTxIn &txin : it->second.tx->vin) {
1325  const auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
1326  if (itPrev == mapOrphanTransactionsByPrev.end()) {
1327  continue;
1328  }
1329  itPrev->second.erase(it);
1330  if (itPrev->second.empty()) {
1331  mapOrphanTransactionsByPrev.erase(itPrev);
1332  }
1333  }
1334 
1335  size_t old_pos = it->second.list_pos;
1336  assert(g_orphan_list[old_pos] == it);
1337  if (old_pos + 1 != g_orphan_list.size()) {
1338  // Unless we're deleting the last entry in g_orphan_list, move the last
1339  // entry to the position we're deleting.
1340  auto it_last = g_orphan_list.back();
1341  g_orphan_list[old_pos] = it_last;
1342  it_last->second.list_pos = old_pos;
1343  }
1344  g_orphan_list.pop_back();
1345 
1346  mapOrphanTransactions.erase(it);
1347  return 1;
1348 }
1349 
1351  LOCK(g_cs_orphans);
1352  int nErased = 0;
1353  auto iter = mapOrphanTransactions.begin();
1354  while (iter != mapOrphanTransactions.end()) {
1355  // Increment to avoid iterator becoming invalid.
1356  const auto maybeErase = iter++;
1357  if (maybeErase->second.fromPeer == peer) {
1358  nErased += EraseOrphanTx(maybeErase->second.tx->GetId());
1359  }
1360  }
1361  if (nErased > 0) {
1362  LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased,
1363  peer);
1364  }
1365 }
1366 
1367 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) {
1368  LOCK(g_cs_orphans);
1369 
1370  unsigned int nEvicted = 0;
1371  static int64_t nNextSweep;
1372  int64_t nNow = GetTime();
1373  if (nNextSweep <= nNow) {
1374  // Sweep out expired orphan pool entries:
1375  int nErased = 0;
1376  int64_t nMinExpTime =
1378  auto iter = mapOrphanTransactions.begin();
1379  while (iter != mapOrphanTransactions.end()) {
1380  const auto maybeErase = iter++;
1381  if (maybeErase->second.nTimeExpire <= nNow) {
1382  nErased += EraseOrphanTx(maybeErase->second.tx->GetId());
1383  } else {
1384  nMinExpTime =
1385  std::min(maybeErase->second.nTimeExpire, nMinExpTime);
1386  }
1387  }
1388  // Sweep again 5 minutes after the next entry that expires in order to
1389  // batch the linear scan.
1390  nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
1391  if (nErased > 0) {
1392  LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n",
1393  nErased);
1394  }
1395  }
1396  FastRandomContext rng;
1397  while (mapOrphanTransactions.size() > nMaxOrphans) {
1398  // Evict a random orphan:
1399  size_t randompos = rng.randrange(g_orphan_list.size());
1400  EraseOrphanTx(g_orphan_list[randompos]->first);
1401  ++nEvicted;
1402  }
1403  return nEvicted;
1404 }
1405 
1406 void PeerManager::Misbehaving(const NodeId pnode, const int howmuch,
1407  const std::string &message) {
1408  assert(howmuch > 0);
1409 
1410  PeerRef peer = GetPeerRef(pnode);
1411  if (peer == nullptr) {
1412  return;
1413  }
1414 
1415  LOCK(peer->m_misbehavior_mutex);
1416 
1417  peer->m_misbehavior_score += howmuch;
1418  const std::string message_prefixed =
1419  message.empty() ? "" : (": " + message);
1420  if (peer->m_misbehavior_score >= DISCOURAGEMENT_THRESHOLD &&
1421  peer->m_misbehavior_score - howmuch < DISCOURAGEMENT_THRESHOLD) {
1423  "Misbehaving: peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED%s\n",
1424  pnode, peer->m_misbehavior_score - howmuch,
1425  peer->m_misbehavior_score, message_prefixed);
1426  peer->m_should_discourage = true;
1427  } else {
1428  LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode,
1429  peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score,
1430  message_prefixed);
1431  }
1432 }
1433 
1435  const BlockValidationState &state,
1436  bool via_compact_block,
1437  const std::string &message) {
1438  switch (state.GetResult()) {
1440  break;
1441  // The node is providing invalid data:
1444  if (!via_compact_block) {
1445  Misbehaving(nodeid, 100, message);
1446  return true;
1447  }
1448  break;
1450  LOCK(cs_main);
1451  CNodeState *node_state = State(nodeid);
1452  if (node_state == nullptr) {
1453  break;
1454  }
1455 
1456  // Ban outbound (but not inbound) peers if on an invalid chain.
1457  // Exempt HB compact block peers and manual connections.
1458  if (!via_compact_block && !node_state->m_is_inbound &&
1459  !node_state->m_is_manual_connection) {
1460  Misbehaving(nodeid, 100, message);
1461  return true;
1462  }
1463  break;
1464  }
1468  Misbehaving(nodeid, 100, message);
1469  return true;
1471  // TODO: Use the state object to report this is probably not the
1472  // best idea. This is effectively unreachable, unless there is a bug
1473  // somewhere.
1474  Misbehaving(nodeid, 20, message);
1475  return true;
1476  // Conflicting (but not necessarily invalid) data or different policy:
1478  // TODO: Handle this much more gracefully (10 DoS points is super
1479  // arbitrary)
1480  Misbehaving(nodeid, 10, message);
1481  return true;
1484  break;
1485  }
1486  if (message != "") {
1487  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1488  }
1489  return false;
1490 }
1491 
1493  const TxValidationState &state,
1494  const std::string &message) {
1495  switch (state.GetResult()) {
1497  break;
1498  // The node is providing invalid data:
1500  Misbehaving(nodeid, 100, message);
1501  return true;
1502  // Conflicting (but not necessarily invalid) data or different policy:
1510  break;
1511  }
1512  if (message != "") {
1513  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1514  }
1515  return false;
1516 }
1517 
1519 //
1520 // blockchain -> download logic notification
1521 //
1522 
1523 // To prevent fingerprinting attacks, only send blocks/headers outside of the
1524 // active chain if they are no more than a month older (both in time, and in
1525 // best equivalent proof of work) than the best header chain we know about and
1526 // we fully-validated them at some point.
1527 static bool BlockRequestAllowed(const CBlockIndex *pindex,
1528  const Consensus::Params &consensusParams)
1531  if (::ChainActive().Contains(pindex)) {
1532  return true;
1533  }
1534  return pindex->IsValid(BlockValidity::SCRIPTS) &&
1535  (pindexBestHeader != nullptr) &&
1536  (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() <
1539  *pindexBestHeader, consensusParams) <
1541 }
1542 
1543 PeerManager::PeerManager(const CChainParams &chainparams, CConnman &connman,
1544  BanMan *banman, CScheduler &scheduler,
1545  ChainstateManager &chainman, CTxMemPool &pool)
1546  : m_chainparams(chainparams), m_connman(connman), m_banman(banman),
1547  m_chainman(chainman), m_mempool(pool), m_stale_tip_check_time(0) {
1548  // Initialize global variables that cannot be constructed at startup.
1549  recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
1550 
1551  {
1552  LOCK(cs_rejectedProofs);
1553  rejectedProofs =
1554  std::make_unique<CRollingBloomFilter>(100000, 0.000001);
1555  }
1556 
1557  // Blocks don't typically have more than 4000 transactions, so this should
1558  // be at least six blocks (~1 hr) worth of transactions that we can store.
1559  // If the number of transactions appearing in a block goes up, or if we are
1560  // seeing getdata requests more than an hour after initial announcement, we
1561  // can increase this number.
1562  // The false positive rate of 1/1M should come out to less than 1
1563  // transaction per day that would be inadvertently ignored (which is the
1564  // same probability that we have in the reject filter).
1565  g_recent_confirmed_transactions.reset(
1566  new CRollingBloomFilter(24000, 0.000001));
1567 
1568  // Stale tip checking and peer eviction are on two different timers, but we
1569  // don't want them to get out of sync due to drift in the scheduler, so we
1570  // combine them in one function and schedule at the quicker (peer-eviction)
1571  // timer.
1572  static_assert(
1574  "peer eviction timer should be less than stale tip check timer");
1575  scheduler.scheduleEvery(
1576  [this]() {
1578  return true;
1579  },
1580  std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1581 
1582  // schedule next run for 10-15 minutes in the future
1583  const std::chrono::milliseconds delta =
1584  std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
1585  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
1586  delta);
1587 
1588  // Update the avalanche statistics on a schedule
1589  scheduler.scheduleEvery(
1590  [this]() {
1592  return true;
1593  },
1595 }
1596 
1602 void PeerManager::BlockConnected(const std::shared_ptr<const CBlock> &pblock,
1603  const CBlockIndex *pindex) {
1604  {
1605  LOCK(g_cs_orphans);
1606 
1607  std::vector<TxId> vOrphanErase;
1608 
1609  for (const CTransactionRef &ptx : pblock->vtx) {
1610  const CTransaction &tx = *ptx;
1611 
1612  // Which orphan pool entries must we evict?
1613  for (const auto &txin : tx.vin) {
1614  auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
1615  if (itByPrev == mapOrphanTransactionsByPrev.end()) {
1616  continue;
1617  }
1618 
1619  for (auto mi = itByPrev->second.begin();
1620  mi != itByPrev->second.end(); ++mi) {
1621  const CTransaction &orphanTx = *(*mi)->second.tx;
1622  const TxId &orphanId = orphanTx.GetId();
1623  vOrphanErase.push_back(orphanId);
1624  }
1625  }
1626  }
1627 
1628  // Erase orphan transactions included or precluded by this block
1629  if (vOrphanErase.size()) {
1630  int nErased = 0;
1631  for (const auto &orphanId : vOrphanErase) {
1632  nErased += EraseOrphanTx(orphanId);
1633  }
1635  "Erased %d orphan tx included or conflicted by block\n",
1636  nErased);
1637  }
1638 
1639  g_last_tip_update = GetTime();
1640  }
1641  {
1642  LOCK(g_cs_recent_confirmed_transactions);
1643  for (const CTransactionRef &ptx : pblock->vtx) {
1644  g_recent_confirmed_transactions->insert(ptx->GetId());
1645  }
1646  }
1647  {
1648  LOCK(cs_main);
1649  for (const auto &ptx : pblock->vtx) {
1650  m_txrequest.ForgetInvId(ptx->GetId());
1651  }
1652  }
1653 }
1654 
1655 void PeerManager::BlockDisconnected(const std::shared_ptr<const CBlock> &block,
1656  const CBlockIndex *pindex) {
1657  // To avoid relay problems with transactions that were previously
1658  // confirmed, clear our filter of recently confirmed transactions whenever
1659  // there's a reorg.
1660  // This means that in a 1-block reorg (where 1 block is disconnected and
1661  // then another block reconnected), our filter will drop to having only one
1662  // block's worth of transactions in it, but that should be fine, since
1663  // presumably the most common case of relaying a confirmed transaction
1664  // should be just after a new block containing it is found.
1665  LOCK(g_cs_recent_confirmed_transactions);
1666  g_recent_confirmed_transactions->reset();
1667 }
1668 
1669 // All of the following cache a recent block, and are protected by
1670 // cs_most_recent_block
1672 static std::shared_ptr<const CBlock>
1673  most_recent_block GUARDED_BY(cs_most_recent_block);
1674 static std::shared_ptr<const CBlockHeaderAndShortTxIDs>
1675  most_recent_compact_block GUARDED_BY(cs_most_recent_block);
1676 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
1677 
1683  const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
1684  std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
1685  std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
1686  const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
1687 
1688  LOCK(cs_main);
1689 
1690  static int nHighestFastAnnounce = 0;
1691  if (pindex->nHeight <= nHighestFastAnnounce) {
1692  return;
1693  }
1694  nHighestFastAnnounce = pindex->nHeight;
1695 
1696  uint256 hashBlock(pblock->GetHash());
1697 
1698  {
1700  most_recent_block_hash = hashBlock;
1701  most_recent_block = pblock;
1702  most_recent_compact_block = pcmpctblock;
1703  }
1704 
1706  [this, &pcmpctblock, pindex, &msgMaker,
1707  &hashBlock](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1709 
1710  // TODO: Avoid the repeated-serialization here
1712  pnode->fDisconnect) {
1713  return;
1714  }
1715  ProcessBlockAvailability(pnode->GetId());
1716  CNodeState &state = *State(pnode->GetId());
1717  // If the peer has, or we announced to them the previous block
1718  // already, but we don't think they have this one, go ahead and
1719  // announce it.
1720  if (state.fPreferHeaderAndIDs && !PeerHasHeader(&state, pindex) &&
1721  PeerHasHeader(&state, pindex->pprev)) {
1723  "%s sending header-and-ids %s to peer=%d\n",
1724  "PeerManager::NewPoWValidBlock", hashBlock.ToString(),
1725  pnode->GetId());
1727  pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
1728  state.pindexBestHeaderSent = pindex;
1729  }
1730  });
1731 }
1732 
1738  const CBlockIndex *pindexFork,
1739  bool fInitialDownload) {
1740  const int nNewHeight = pindexNew->nHeight;
1741  m_connman.SetBestHeight(nNewHeight);
1742 
1743  SetServiceFlagsIBDCache(!fInitialDownload);
1744  if (!fInitialDownload) {
1745  // Find the hashes of all blocks that weren't previously in the best
1746  // chain.
1747  std::vector<BlockHash> vHashes;
1748  const CBlockIndex *pindexToAnnounce = pindexNew;
1749  while (pindexToAnnounce != pindexFork) {
1750  vHashes.push_back(pindexToAnnounce->GetBlockHash());
1751  pindexToAnnounce = pindexToAnnounce->pprev;
1752  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
1753  // Limit announcements in case of a huge reorganization. Rely on
1754  // the peer's synchronization mechanism in that case.
1755  break;
1756  }
1757  }
1758  // Relay inventory, but don't relay old inventory during initial block
1759  // download.
1760  m_connman.ForEachNode([nNewHeight, &vHashes](CNode *pnode) {
1761  LOCK(pnode->cs_inventory);
1762  if (nNewHeight > (pnode->nStartingHeight != -1
1763  ? pnode->nStartingHeight - 2000
1764  : 0)) {
1765  for (const BlockHash &hash : reverse_iterate(vHashes)) {
1766  pnode->vBlockHashesToAnnounce.push_back(hash);
1767  }
1768  }
1769  });
1771  }
1772 }
1773 
1779  const BlockValidationState &state) {
1780  LOCK(cs_main);
1781 
1782  const BlockHash hash = block.GetHash();
1783  std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
1784  mapBlockSource.find(hash);
1785 
1786  // If the block failed validation, we know where it came from and we're
1787  // still connected to that peer, maybe punish.
1788  if (state.IsInvalid() && it != mapBlockSource.end() &&
1789  State(it->second.first)) {
1790  MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state,
1791  /*via_compact_block=*/!it->second.second);
1792  }
1793  // Check that:
1794  // 1. The block is valid
1795  // 2. We're not in initial block download
1796  // 3. This is currently the best block we're aware of. We haven't updated
1797  // the tip yet so we have no way to check this directly here. Instead we
1798  // just check that there are currently no other blocks in flight.
1799  else if (state.IsValid() &&
1800  !::ChainstateActive().IsInitialBlockDownload() &&
1801  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
1802  if (it != mapBlockSource.end()) {
1803  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, m_connman);
1804  }
1805  }
1806 
1807  if (it != mapBlockSource.end()) {
1808  mapBlockSource.erase(it);
1809  }
1810 }
1811 
1813 //
1814 // Messages
1815 //
1816 
1817 static bool AlreadyHaveTx(const TxId &txid, const CTxMemPool &mempool)
1819  assert(recentRejects);
1820  if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
1821  // If the chain tip has changed previously rejected transactions
1822  // might be now valid, e.g. due to a nLockTime'd tx becoming
1823  // valid, or a double-spend. Reset the rejects filter and give
1824  // those txs a second chance.
1825  hashRecentRejectsChainTip = ::ChainActive().Tip()->GetBlockHash();
1826  recentRejects->reset();
1827  }
1828 
1829  {
1830  LOCK(g_cs_orphans);
1831  if (mapOrphanTransactions.count(txid)) {
1832  return true;
1833  }
1834  }
1835 
1836  {
1837  LOCK(g_cs_recent_confirmed_transactions);
1838  if (g_recent_confirmed_transactions->contains(txid)) {
1839  return true;
1840  }
1841  }
1842 
1843  return recentRejects->contains(txid) || mempool.exists(txid);
1844 }
1845 
1846 static bool AlreadyHaveBlock(const BlockHash &block_hash)
1848  return LookupBlockIndex(block_hash) != nullptr;
1849 }
1850 
1851 static bool AlreadyHaveProof(const avalanche::ProofId &proofid) {
1852  assert(g_avalanche);
1853 
1854  const bool hasProof = g_avalanche->withPeerManager(
1855  [&proofid](avalanche::PeerManager &pm) { return pm.exists(proofid); });
1856 
1857  LOCK(cs_rejectedProofs);
1858  return hasProof || rejectedProofs->contains(proofid);
1859 }
1860 
1861 void RelayTransaction(const TxId &txid, const CConnman &connman) {
1862  connman.ForEachNode(
1863  [&txid](CNode *pnode) { pnode->PushTxInventory(txid); });
1864 }
1865 
1866 void RelayProof(const avalanche::ProofId &proofid, const CConnman &connman) {
1867  connman.ForEachNode(
1868  [&proofid](CNode *pnode) { pnode->PushProofInventory(proofid); });
1869 }
1870 
1871 static void RelayAddress(const CAddress &addr, bool fReachable,
1872  const CConnman &connman) {
1873  if (!fReachable && !addr.IsRelayable()) {
1874  return;
1875  }
1876 
1877  // Relay to a limited number of other nodes.
1878  // Use deterministic randomness to send to the same nodes for 24 hours at a
1879  // time so the m_addr_knowns of the chosen nodes prevent repeats
1880  uint64_t hashAddr = addr.GetHash();
1881  const CSipHasher hasher =
1883  .Write(hashAddr << 32)
1884  .Write((GetTime() + hashAddr) / (24 * 60 * 60));
1885  FastRandomContext insecure_rand;
1886 
1887  // Relay reachable addresses to 2 peers. Unreachable addresses are relayed
1888  // randomly to 1 or 2 peers.
1889  unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
1890  std::array<std::pair<uint64_t, CNode *>, 2> best{
1891  {{0, nullptr}, {0, nullptr}}};
1892  assert(nRelayNodes <= best.size());
1893 
1894  auto sortfunc = [&best, &hasher, nRelayNodes](CNode *pnode) {
1895  if (pnode->RelayAddrsWithConn()) {
1896  uint64_t hashKey =
1897  CSipHasher(hasher).Write(pnode->GetId()).Finalize();
1898  for (unsigned int i = 0; i < nRelayNodes; i++) {
1899  if (hashKey > best[i].first) {
1900  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
1901  best.begin() + i + 1);
1902  best[i] = std::make_pair(hashKey, pnode);
1903  break;
1904  }
1905  }
1906  }
1907  };
1908 
1909  auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
1910  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
1911  best[i].second->PushAddress(addr, insecure_rand);
1912  }
1913  };
1914 
1915  connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
1916 }
1917 
1918 static void ProcessGetBlockData(const Config &config, CNode &pfrom,
1919  const CInv &inv, CConnman &connman,
1920  const std::atomic<bool> &interruptMsgProc) {
1921  const Consensus::Params &consensusParams =
1922  config.GetChainParams().GetConsensus();
1923 
1924  const BlockHash hash(inv.hash);
1925 
1926  bool send = false;
1927  std::shared_ptr<const CBlock> a_recent_block;
1928  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
1929  {
1931  a_recent_block = most_recent_block;
1932  a_recent_compact_block = most_recent_compact_block;
1933  }
1934 
1935  bool need_activate_chain = false;
1936  {
1937  LOCK(cs_main);
1938  const CBlockIndex *pindex = LookupBlockIndex(hash);
1939  if (pindex) {
1940  if (pindex->HaveTxsDownloaded() &&
1941  !pindex->IsValid(BlockValidity::SCRIPTS) &&
1942  pindex->IsValid(BlockValidity::TREE)) {
1943  // If we have the block and all of its parents, but have not yet
1944  // validated it, we might be in the middle of connecting it (ie
1945  // in the unlock of cs_main before ActivateBestChain but after
1946  // AcceptBlock). In this case, we need to run ActivateBestChain
1947  // prior to checking the relay conditions below.
1948  need_activate_chain = true;
1949  }
1950  }
1951  } // release cs_main before calling ActivateBestChain
1952  if (need_activate_chain) {
1953  BlockValidationState state;
1954  if (!ActivateBestChain(config, state, a_recent_block)) {
1955  LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
1956  state.ToString());
1957  }
1958  }
1959 
1960  LOCK(cs_main);
1961  const CBlockIndex *pindex = LookupBlockIndex(hash);
1962  if (pindex) {
1963  send = BlockRequestAllowed(pindex, consensusParams);
1964  if (!send) {
1966  "%s: ignoring request from peer=%i for old "
1967  "block that isn't in the main chain\n",
1968  __func__, pfrom.GetId());
1969  }
1970  }
1971  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1972  // Disconnect node in case we have reached the outbound limit for serving
1973  // historical blocks.
1974  if (send && connman.OutboundTargetReached(true) &&
1975  (((pindexBestHeader != nullptr) &&
1976  (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() >
1978  inv.IsMsgFilteredBlk()) &&
1979  // nodes with the download permission may exceed target
1980  !pfrom.HasPermission(PF_DOWNLOAD)) {
1982  "historical block serving limit reached, disconnect peer=%d\n",
1983  pfrom.GetId());
1984 
1985  // disconnect node
1986  pfrom.fDisconnect = true;
1987  send = false;
1988  }
1989  // Avoid leaking prune-height by never sending blocks below the
1990  // NODE_NETWORK_LIMITED threshold.
1991  // Add two blocks buffer extension for possible races
1992  if (send && !pfrom.HasPermission(PF_NOBAN) &&
1993  ((((pfrom.GetLocalServices() & NODE_NETWORK_LIMITED) ==
1995  ((pfrom.GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) &&
1996  (::ChainActive().Tip()->nHeight - pindex->nHeight >
1997  (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) {
1999  "Ignore block request below NODE_NETWORK_LIMITED "
2000  "threshold from peer=%d\n",
2001  pfrom.GetId());
2002 
2003  // disconnect node and prevent it from stalling (would otherwise wait
2004  // for the missing block)
2005  pfrom.fDisconnect = true;
2006  send = false;
2007  }
2008  // Pruned nodes may have deleted the block, so check whether it's available
2009  // before trying to send.
2010  if (send && pindex->nStatus.hasData()) {
2011  std::shared_ptr<const CBlock> pblock;
2012  if (a_recent_block &&
2013  a_recent_block->GetHash() == pindex->GetBlockHash()) {
2014  pblock = a_recent_block;
2015  } else {
2016  // Send block from disk
2017  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
2018  if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams)) {
2019  assert(!"cannot load block from disk");
2020  }
2021  pblock = pblockRead;
2022  }
2023  if (inv.IsMsgBlk()) {
2024  connman.PushMessage(&pfrom,
2025  msgMaker.Make(NetMsgType::BLOCK, *pblock));
2026  } else if (inv.IsMsgFilteredBlk()) {
2027  bool sendMerkleBlock = false;
2028  CMerkleBlock merkleBlock;
2029  if (pfrom.m_tx_relay != nullptr) {
2030  LOCK(pfrom.m_tx_relay->cs_filter);
2031  if (pfrom.m_tx_relay->pfilter) {
2032  sendMerkleBlock = true;
2033  merkleBlock =
2034  CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter);
2035  }
2036  }
2037  if (sendMerkleBlock) {
2038  connman.PushMessage(
2039  &pfrom,
2040  msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
2041  // CMerkleBlock just contains hashes, so also push any
2042  // transactions in the block the client did not see. This avoids
2043  // hurting performance by pointlessly requiring a round-trip.
2044  // Note that there is currently no way for a node to request any
2045  // single transactions we didn't send here - they must either
2046  // disconnect and retry or request the full block. Thus, the
2047  // protocol spec specified allows for us to provide duplicate
2048  // txn here, however we MUST always provide at least what the
2049  // remote peer needs.
2050  typedef std::pair<size_t, uint256> PairType;
2051  for (PairType &pair : merkleBlock.vMatchedTxn) {
2052  connman.PushMessage(
2053  &pfrom, msgMaker.Make(NetMsgType::TX,
2054  *pblock->vtx[pair.first]));
2055  }
2056  }
2057  // else
2058  // no response
2059  } else if (inv.IsMsgCmpctBlk()) {
2060  // If a peer is asking for old blocks, we're almost guaranteed they
2061  // won't have a useful mempool to match against a compact block, and
2062  // we don't feel like constructing the object for them, so instead
2063  // we respond with the full, non-compact block.
2064  int nSendFlags = 0;
2065  if (CanDirectFetch(consensusParams) &&
2066  pindex->nHeight >=
2067  ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) {
2068  CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
2069  connman.PushMessage(
2070  &pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
2071  cmpctblock));
2072  } else {
2073  connman.PushMessage(
2074  &pfrom,
2075  msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
2076  }
2077  }
2078 
2079  // Trigger the peer node to send a getblocks request for the next batch
2080  // of inventory.
2081  if (hash == pfrom.hashContinue) {
2082  // Send immediately. This must send even if redundant, and
2083  // we want it right after the last block so they don't wait for
2084  // other stuff first.
2085  std::vector<CInv> vInv;
2086  vInv.push_back(
2087  CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash()));
2088  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
2089  pfrom.hashContinue = BlockHash();
2090  }
2091  }
2092 }
2093 
2097  const CNode &peer, const TxId &txid,
2098  const std::chrono::seconds mempool_req,
2099  const std::chrono::seconds now)
2101  auto txinfo = mempool.info(txid);
2102  if (txinfo.tx) {
2103  // If a TX could have been INVed in reply to a MEMPOOL request,
2104  // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
2105  // unconditionally.
2106  if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
2107  txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
2108  return std::move(txinfo.tx);
2109  }
2110  }
2111 
2112  {
2113  LOCK(cs_main);
2114 
2115  // Otherwise, the transaction must have been announced recently.
2116  if (State(peer.GetId())->m_recently_announced_invs.contains(txid)) {
2117  // If it was, it can be relayed from either the mempool...
2118  if (txinfo.tx) {
2119  return std::move(txinfo.tx);
2120  }
2121  // ... or the relay pool.
2122  auto mi = mapRelay.find(txid);
2123  if (mi != mapRelay.end()) {
2124  return mi->second;
2125  }
2126  }
2127  }
2128 
2129  return {};
2130 }
2131 
2134 static avalanche::ProofRef
2135 FindProofForGetData(const CNode &peer, const avalanche::ProofId &proofid,
2136  const std::chrono::seconds now) {
2137  avalanche::ProofRef proof = nullptr;
2138 
2139  bool send_unconditionally =
2140  g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
2141  return pm.forPeer(proofid, [&](const avalanche::Peer &peer) {
2142  proof = peer.proof;
2143 
2144  // If we know that proof for long enough, allow for requesting
2145  // it.
2146  return peer.registration_time <=
2148  });
2149  });
2150 
2151  // We don't have this proof
2152  if (!proof) {
2153  return nullptr;
2154  }
2155 
2156  if (send_unconditionally) {
2157  return proof;
2158  }
2159 
2160  // Otherwise, the proofs must have been announced recently.
2161  LOCK(cs_main);
2162  if (State(peer.GetId())->m_recently_announced_proofs.contains(proofid)) {
2163  return proof;
2164  }
2165 
2166  return nullptr;
2167 }
2168 
2169 static void ProcessGetData(const Config &config, CNode &pfrom, Peer &peer,
2170  CConnman &connman, CTxMemPool &mempool,
2171  const std::atomic<bool> &interruptMsgProc)
2172  EXCLUSIVE_LOCKS_REQUIRED(peer.m_getdata_requests_mutex)
2173  LOCKS_EXCLUDED(::cs_main) {
2175 
2176  std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
2177  std::vector<CInv> vNotFound;
2178  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2179 
2180  const std::chrono::seconds now = GetTime<std::chrono::seconds>();
2181  // Get last mempool request time
2182  const std::chrono::seconds mempool_req =
2183  pfrom.m_tx_relay != nullptr
2184  ? pfrom.m_tx_relay->m_last_mempool_req.load()
2185  : std::chrono::seconds::min();
2186 
2187  // Process as many TX or AVA_PROOF items from the front of the getdata
2188  // queue as possible, since they're common and it's efficient to batch
2189  // process them.
2190  while (it != peer.m_getdata_requests.end()) {
2191  if (interruptMsgProc) {
2192  return;
2193  }
2194  // The send buffer provides backpressure. If there's no space in
2195  // the buffer, pause processing until the next call.
2196  if (pfrom.fPauseSend) {
2197  break;
2198  }
2199 
2200  const CInv &inv = *it;
2201 
2202  if (it->IsMsgProof()) {
2203  const avalanche::ProofId proofid(inv.hash);
2204  auto proof = FindProofForGetData(pfrom, proofid, now);
2205  if (proof) {
2206  connman.PushMessage(
2207  &pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
2208  g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
2209  pm.removeUnbroadcastProof(proofid);
2210  });
2211  } else {
2212  vNotFound.push_back(inv);
2213  }
2214 
2215  ++it;
2216  continue;
2217  }
2218 
2219  if (it->IsMsgTx()) {
2220  if (pfrom.m_tx_relay == nullptr) {
2221  // Ignore GETDATA requests for transactions from blocks-only
2222  // peers.
2223  continue;
2224  }
2225 
2226  const TxId txid(inv.hash);
2227  CTransactionRef tx =
2228  FindTxForGetData(mempool, pfrom, txid, mempool_req, now);
2229  if (tx) {
2230  int nSendFlags = 0;
2231  connman.PushMessage(
2232  &pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
2233  mempool.RemoveUnbroadcastTx(txid);
2234  // As we're going to send tx, make sure its unconfirmed parents
2235  // are made requestable.
2236  std::vector<TxId> parent_ids_to_add;
2237  {
2238  LOCK(mempool.cs);
2239  auto txiter = mempool.GetIter(tx->GetId());
2240  if (txiter) {
2241  const CTxMemPoolEntry::Parents &parents =
2242  (*txiter)->GetMemPoolParentsConst();
2243  parent_ids_to_add.reserve(parents.size());
2244  for (const CTxMemPoolEntry &parent : parents) {
2245  if (parent.GetTime() >
2246  now - UNCONDITIONAL_RELAY_DELAY) {
2247  parent_ids_to_add.push_back(
2248  parent.GetTx().GetId());
2249  }
2250  }
2251  }
2252  }
2253  for (const TxId &parent_txid : parent_ids_to_add) {
2254  // Relaying a transaction with a recent but unconfirmed
2255  // parent.
2256  if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory,
2257  return !pfrom.m_tx_relay->filterInventoryKnown
2258  .contains(parent_txid))) {
2259  LOCK(cs_main);
2260  State(pfrom.GetId())
2261  ->m_recently_announced_invs.insert(parent_txid);
2262  }
2263  }
2264  } else {
2265  vNotFound.push_back(inv);
2266  }
2267 
2268  ++it;
2269  continue;
2270  }
2271 
2272  // It's neither a proof nor a transaction
2273  break;
2274  }
2275 
2276  // Only process one BLOCK item per call, since they're uncommon and can be
2277  // expensive to process.
2278  if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
2279  const CInv &inv = *it++;
2280  if (inv.IsGenBlkMsg()) {
2281  ProcessGetBlockData(config, pfrom, inv, connman, interruptMsgProc);
2282  }
2283  // else: If the first item on the queue is an unknown type, we erase it
2284  // and continue processing the queue on the next call.
2285  }
2286 
2287  peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
2288 
2289  if (!vNotFound.empty()) {
2290  // Let the peer know that we didn't find what it asked for, so it
2291  // doesn't have to wait around forever. SPV clients care about this
2292  // message: it's needed when they are recursively walking the
2293  // dependencies of relevant unconfirmed transactions. SPV clients want
2294  // to do that because they want to know about (and store and rebroadcast
2295  // and risk analyze) the dependencies of transactions relevant to them,
2296  // without having to download the entire memory pool. Also, other nodes
2297  // can use these messages to automatically request a transaction from
2298  // some other peer that annnounced it, and stop waiting for us to
2299  // respond. In normal operation, we often send NOTFOUND messages for
2300  // parents of transactions that we relay; if a peer is missing a parent,
2301  // they may assume we have them and request the parents from us.
2302  connman.PushMessage(&pfrom,
2303  msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
2304  }
2305 }
2306 
2308  const BlockTransactionsRequest &req) {
2309  BlockTransactions resp(req);
2310  for (size_t i = 0; i < req.indices.size(); i++) {
2311  if (req.indices[i] >= block.vtx.size()) {
2312  Misbehaving(pfrom, 100,
2313  "getblocktxn with out-of-bounds tx indices");
2314  return;
2315  }
2316  resp.txn[i] = block.vtx[req.indices[i]];
2317  }
2318  LOCK(cs_main);
2319  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2320  int nSendFlags = 0;
2322  &pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
2323 }
2324 
2326  const Config &config, CNode &pfrom,
2327  const std::vector<CBlockHeader> &headers, bool via_compact_block) {
2328  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2329  size_t nCount = headers.size();
2330 
2331  if (nCount == 0) {
2332  // Nothing interesting. Stop asking this peers for more headers.
2333  return;
2334  }
2335 
2336  bool received_new_header = false;
2337  const CBlockIndex *pindexLast = nullptr;
2338  {
2339  LOCK(cs_main);
2340  CNodeState *nodestate = State(pfrom.GetId());
2341 
2342  // If this looks like it could be a block announcement (nCount <
2343  // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
2344  // don't connect:
2345  // - Send a getheaders message in response to try to connect the chain.
2346  // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
2347  // don't connect before giving DoS points
2348  // - Once a headers message is received that is valid and does connect,
2349  // nUnconnectingHeaders gets reset back to 0.
2350  if (!LookupBlockIndex(headers[0].hashPrevBlock) &&
2351  nCount < MAX_BLOCKS_TO_ANNOUNCE) {
2352  nodestate->nUnconnectingHeaders++;
2354  &pfrom,
2355  msgMaker.Make(NetMsgType::GETHEADERS,
2356  ::ChainActive().GetLocator(pindexBestHeader),
2357  uint256()));
2358  LogPrint(
2359  BCLog::NET,
2360  "received header %s: missing prev block %s, sending getheaders "
2361  "(%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
2362  headers[0].GetHash().ToString(),
2363  headers[0].hashPrevBlock.ToString(), pindexBestHeader->nHeight,
2364  pfrom.GetId(), nodestate->nUnconnectingHeaders);
2365  // Set hashLastUnknownBlock for this peer, so that if we eventually
2366  // get the headers - even from a different peer - we can use this
2367  // peer to download.
2368  UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
2369 
2370  if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS ==
2371  0) {
2372  // The peer is sending us many headers we can't connect.
2373  Misbehaving(pfrom, 20,
2374  strprintf("%d non-connecting headers",
2375  nodestate->nUnconnectingHeaders));
2376  }
2377  return;
2378  }
2379 
2380  BlockHash hashLastBlock;
2381  for (const CBlockHeader &header : headers) {
2382  if (!hashLastBlock.IsNull() &&
2383  header.hashPrevBlock != hashLastBlock) {
2384  Misbehaving(pfrom, 20, "non-continuous headers sequence");
2385  return;
2386  }
2387  hashLastBlock = header.GetHash();
2388  }
2389 
2390  // If we don't have the last header, then they'll have given us
2391  // something new (if these headers are valid).
2392  if (!LookupBlockIndex(hashLastBlock)) {
2393  received_new_header = true;
2394  }
2395  }
2396 
2397  BlockValidationState state;
2398  if (!m_chainman.ProcessNewBlockHeaders(config, headers, state,
2399  &pindexLast)) {
2400  if (state.IsInvalid()) {
2401  MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block,
2402  "invalid header received");
2403  return;
2404  }
2405  }
2406 
2407  {
2408  LOCK(cs_main);
2409  CNodeState *nodestate = State(pfrom.GetId());
2410  if (nodestate->nUnconnectingHeaders > 0) {
2412  "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n",
2413  pfrom.GetId(), nodestate->nUnconnectingHeaders);
2414  }
2415  nodestate->nUnconnectingHeaders = 0;
2416 
2417  assert(pindexLast);
2418  UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
2419 
2420  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
2421  // because it is set in UpdateBlockAvailability. Some nullptr checks are
2422  // still present, however, as belt-and-suspenders.
2423 
2424  if (received_new_header &&
2425  pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) {
2426  nodestate->m_last_block_announcement = GetTime();
2427  }
2428 
2429  if (nCount == MAX_HEADERS_RESULTS) {
2430  // Headers message had its maximum size; the peer may have more
2431  // headers.
2432  // TODO: optimize: if pindexLast is an ancestor of
2433  // ::ChainActive().Tip or pindexBestHeader, continue from there
2434  // instead.
2435  LogPrint(
2436  BCLog::NET,
2437  "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
2438  pindexLast->nHeight, pfrom.GetId(), pfrom.nStartingHeight);
2440  &pfrom, msgMaker.Make(NetMsgType::GETHEADERS,
2441  ::ChainActive().GetLocator(pindexLast),
2442  uint256()));
2443  }
2444 
2445  bool fCanDirectFetch = CanDirectFetch(m_chainparams.GetConsensus());
2446  // If this set of headers is valid and ends in a block with at least as
2447  // much work as our tip, download as much as possible.
2448  if (fCanDirectFetch && pindexLast->IsValid(BlockValidity::TREE) &&
2449  ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) {
2450  std::vector<const CBlockIndex *> vToFetch;
2451  const CBlockIndex *pindexWalk = pindexLast;
2452  // Calculate all the blocks we'd need to switch to pindexLast, up to
2453  // a limit.
2454  while (pindexWalk && !::ChainActive().Contains(pindexWalk) &&
2455  vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2456  if (!pindexWalk->nStatus.hasData() &&
2457  !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) {
2458  // We don't have this block, and it's not yet in flight.
2459  vToFetch.push_back(pindexWalk);
2460  }
2461  pindexWalk = pindexWalk->pprev;
2462  }
2463  // If pindexWalk still isn't on our main chain, we're looking at a
2464  // very large reorg at a time we think we're close to caught up to
2465  // the main chain -- this shouldn't really happen. Bail out on the
2466  // direct fetch and rely on parallel download instead.
2467  if (!::ChainActive().Contains(pindexWalk)) {
2468  LogPrint(
2469  BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
2470  pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
2471  } else {
2472  std::vector<CInv> vGetData;
2473  // Download as much as possible, from earliest to latest.
2474  for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
2475  if (nodestate->nBlocksInFlight >=
2477  // Can't download any more from this peer
2478  break;
2479  }
2480  vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
2481  MarkBlockAsInFlight(config, m_mempool, pfrom.GetId(),
2482  pindex->GetBlockHash(),
2483  m_chainparams.GetConsensus(), pindex);
2484  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
2485  pindex->GetBlockHash().ToString(), pfrom.GetId());
2486  }
2487  if (vGetData.size() > 1) {
2489  "Downloading blocks toward %s (%d) via headers "
2490  "direct fetch\n",
2491  pindexLast->GetBlockHash().ToString(),
2492  pindexLast->nHeight);
2493  }
2494  if (vGetData.size() > 0) {
2495  if (nodestate->fSupportsDesiredCmpctVersion &&
2496  vGetData.size() == 1 && mapBlocksInFlight.size() == 1 &&
2497  pindexLast->pprev->IsValid(BlockValidity::CHAIN)) {
2498  // In any case, we want to download using a compact
2499  // block, not a regular one.
2500  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
2501  }
2503  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
2504  }
2505  }
2506  }
2507  // If we're in IBD, we want outbound peers that will serve us a useful
2508  // chain. Disconnect peers that are on chains with insufficient work.
2509  if (::ChainstateActive().IsInitialBlockDownload() &&
2510  nCount != MAX_HEADERS_RESULTS) {
2511  // When nCount < MAX_HEADERS_RESULTS, we know we have no more
2512  // headers to fetch from this peer.
2513  if (nodestate->pindexBestKnownBlock &&
2514  nodestate->pindexBestKnownBlock->nChainWork <
2516  // This peer has too little work on their headers chain to help
2517  // us sync -- disconnect if it is an outbound disconnection
2518  // candidate.
2519  // Note: We compare their tip to nMinimumChainWork (rather than
2520  // ::ChainActive().Tip()) because we won't start block download
2521  // until we have a headers chain that has at least
2522  // nMinimumChainWork, even if a peer has a chain past our tip,
2523  // as an anti-DoS measure.
2524  if (pfrom.IsOutboundOrBlockRelayConn()) {
2525  LogPrintf("Disconnecting outbound peer %d -- headers "
2526  "chain has insufficient work\n",
2527  pfrom.GetId());
2528  pfrom.fDisconnect = true;
2529  }
2530  }
2531  }
2532 
2533  // If this is an outbound full-relay peer, check to see if we should
2534  // protect it from the bad/lagging chain logic.
2535  // Note that outbound block-relay peers are excluded from this
2536  // protection, and thus always subject to eviction under the bad/lagging
2537  // chain logic.
2538  // See ChainSyncTimeoutState.
2539  if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() &&
2540  nodestate->pindexBestKnownBlock != nullptr) {
2541  if (g_outbound_peers_with_protect_from_disconnect <
2543  nodestate->pindexBestKnownBlock->nChainWork >=
2544  ::ChainActive().Tip()->nChainWork &&
2545  !nodestate->m_chain_sync.m_protect) {
2547  "Protecting outbound peer=%d from eviction\n",
2548  pfrom.GetId());
2549  nodestate->m_chain_sync.m_protect = true;
2550  ++g_outbound_peers_with_protect_from_disconnect;
2551  }
2552  }
2553  }
2554 }
2555 
2565 void PeerManager::ProcessOrphanTx(const Config &config,
2566  std::set<TxId> &orphan_work_set)
2570  while (!orphan_work_set.empty()) {
2571  const TxId orphanTxId = *orphan_work_set.begin();
2572  orphan_work_set.erase(orphan_work_set.begin());
2573 
2574  auto orphan_it = mapOrphanTransactions.find(orphanTxId);
2575  if (orphan_it == mapOrphanTransactions.end()) {
2576  continue;
2577  }
2578 
2579  const CTransactionRef porphanTx = orphan_it->second.tx;
2580  TxValidationState state;
2581 
2582  if (AcceptToMemoryPool(config, m_mempool, state, porphanTx,
2583  false /* bypass_limits */)) {
2584  LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n",
2585  orphanTxId.ToString());
2586  RelayTransaction(orphanTxId, m_connman);
2587  for (size_t i = 0; i < porphanTx->vout.size(); i++) {
2588  auto it_by_prev =
2589  mapOrphanTransactionsByPrev.find(COutPoint(orphanTxId, i));
2590  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
2591  for (const auto &elem : it_by_prev->second) {
2592  orphan_work_set.insert(elem->first);
2593  }
2594  }
2595  }
2596  EraseOrphanTx(orphanTxId);
2597  break;
2598  } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
2599  if (state.IsInvalid()) {
2601  " invalid orphan tx %s from peer=%d. %s\n",
2602  orphanTxId.ToString(), orphan_it->second.fromPeer,
2603  state.ToString());
2604  // Punish peer that gave us an invalid orphan tx
2605  MaybePunishNodeForTx(orphan_it->second.fromPeer, state);
2606  }
2607  // Has inputs but not accepted to mempool
2608  // Probably non-standard or insufficient fee
2609  LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n",
2610  orphanTxId.ToString());
2611 
2612  assert(recentRejects);
2613  recentRejects->insert(orphanTxId);
2614 
2615  EraseOrphanTx(orphanTxId);
2616  break;
2617  }
2618  }
2619  m_mempool.check(&::ChainstateActive().CoinsTip());
2620 }
2621 
2642  CNode &peer, const CChainParams &chain_params, BlockFilterType filter_type,
2643  uint32_t start_height, const BlockHash &stop_hash, uint32_t max_height_diff,
2644  const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index) {
2645  const bool supported_filter_type =
2646  (filter_type == BlockFilterType::BASIC &&
2648  if (!supported_filter_type) {
2650  "peer %d requested unsupported block filter type: %d\n",
2651  peer.GetId(), static_cast<uint8_t>(filter_type));
2652  peer.fDisconnect = true;
2653  return false;
2654  }
2655 
2656  {
2657  LOCK(cs_main);
2658  stop_index = LookupBlockIndex(stop_hash);
2659 
2660  // Check that the stop block exists and the peer would be allowed to
2661  // fetch it.
2662  if (!stop_index ||
2663  !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) {
2664  LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
2665  peer.GetId(), stop_hash.ToString());
2666  peer.fDisconnect = true;
2667  return false;
2668  }
2669  }
2670 
2671  uint32_t stop_height = stop_index->nHeight;
2672  if (start_height > stop_height) {
2673  LogPrint(
2674  BCLog::NET,
2675  "peer %d sent invalid getcfilters/getcfheaders with " /* Continued
2676  */
2677  "start height %d and stop height %d\n",
2678  peer.GetId(), start_height, stop_height);
2679  peer.fDisconnect = true;
2680  return false;
2681  }
2682  if (stop_height - start_height >= max_height_diff) {
2684  "peer %d requested too many cfilters/cfheaders: %d / %d\n",
2685  peer.GetId(), stop_height - start_height + 1, max_height_diff);
2686  peer.fDisconnect = true;
2687  return false;
2688  }
2689 
2690  filter_index = GetBlockFilterIndex(filter_type);
2691  if (!filter_index) {
2692  LogPrint(BCLog::NET, "Filter index for supported type %s not found\n",
2693  BlockFilterTypeName(filter_type));
2694  return false;
2695  }
2696 
2697  return true;
2698 }
2699 
2710 static void ProcessGetCFilters(CNode &peer, CDataStream &vRecv,
2711  const CChainParams &chain_params,
2712  CConnman &connman) {
2713  uint8_t filter_type_ser;
2714  uint32_t start_height;
2715  BlockHash stop_hash;
2716 
2717  vRecv >> filter_type_ser >> start_height >> stop_hash;
2718 
2719  const BlockFilterType filter_type =
2720  static_cast<BlockFilterType>(filter_type_ser);
2721 
2722  const CBlockIndex *stop_index;
2723  BlockFilterIndex *filter_index;
2725  peer, chain_params, filter_type, start_height, stop_hash,
2726  MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
2727  return;
2728  }
2729 
2730  std::vector<BlockFilter> filters;
2731  if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
2733  "Failed to find block filter in index: filter_type=%s, "
2734  "start_height=%d, stop_hash=%s\n",
2735  BlockFilterTypeName(filter_type), start_height,
2736  stop_hash.ToString());
2737  return;
2738  }
2739 
2740  for (const auto &filter : filters) {
2742  .Make(NetMsgType::CFILTER, filter);
2743  connman.PushMessage(&peer, std::move(msg));
2744  }
2745 }
2746 
2757 static void ProcessGetCFHeaders(CNode &peer, CDataStream &vRecv,
2758  const CChainParams &chain_params,
2759  CConnman &connman) {
2760  uint8_t filter_type_ser;
2761  uint32_t start_height;
2762  BlockHash stop_hash;
2763 
2764  vRecv >> filter_type_ser >> start_height >> stop_hash;
2765 
2766  const BlockFilterType filter_type =
2767  static_cast<BlockFilterType>(filter_type_ser);
2768 
2769  const CBlockIndex *stop_index;
2770  BlockFilterIndex *filter_index;
2772  peer, chain_params, filter_type, start_height, stop_hash,
2773  MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
2774  return;
2775  }
2776 
2777  uint256 prev_header;
2778  if (start_height > 0) {
2779  const CBlockIndex *const prev_block =
2780  stop_index->GetAncestor(static_cast<int>(start_height - 1));
2781  if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
2783  "Failed to find block filter header in index: "
2784  "filter_type=%s, block_hash=%s\n",
2785  BlockFilterTypeName(filter_type),
2786  prev_block->GetBlockHash().ToString());
2787  return;
2788  }
2789  }
2790 
2791  std::vector<uint256> filter_hashes;
2792  if (!filter_index->LookupFilterHashRange(start_height, stop_index,
2793  filter_hashes)) {
2795  "Failed to find block filter hashes in index: filter_type=%s, "
2796  "start_height=%d, stop_hash=%s\n",
2797  BlockFilterTypeName(filter_type), start_height,
2798  stop_hash.ToString());
2799  return;
2800  }
2801 
2802  CSerializedNetMsg msg =
2804  .Make(NetMsgType::CFHEADERS, filter_type_ser,
2805  stop_index->GetBlockHash(), prev_header, filter_hashes);
2806  connman.PushMessage(&peer, std::move(msg));
2807 }
2808 
2819 static void ProcessGetCFCheckPt(CNode &peer, CDataStream &vRecv,
2820  const CChainParams &chain_params,
2821  CConnman &connman) {
2822  uint8_t filter_type_ser;
2823  BlockHash stop_hash;
2824 
2825  vRecv >> filter_type_ser >> stop_hash;
2826 
2827  const BlockFilterType filter_type =
2828  static_cast<BlockFilterType>(filter_type_ser);
2829 
2830  const CBlockIndex *stop_index;
2831  BlockFilterIndex *filter_index;
2833  peer, chain_params, filter_type, /*start_height=*/0, stop_hash,
2834  /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
2835  stop_index, filter_index)) {
2836  return;
2837  }
2838 
2839  std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
2840 
2841  // Populate headers.
2842  const CBlockIndex *block_index = stop_index;
2843  for (int i = headers.size() - 1; i >= 0; i--) {
2844  int height = (i + 1) * CFCHECKPT_INTERVAL;
2845  block_index = block_index->GetAncestor(height);
2846 
2847  if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
2849  "Failed to find block filter header in index: "
2850  "filter_type=%s, block_hash=%s\n",
2851  BlockFilterTypeName(filter_type),
2852  block_index->GetBlockHash().ToString());
2853  return;
2854  }
2855  }
2856 
2858  .Make(NetMsgType::CFCHECKPT, filter_type_ser,
2859  stop_index->GetBlockHash(), headers);
2860  connman.PushMessage(&peer, std::move(msg));
2861 }
2862 
2863 bool IsAvalancheMessageType(const std::string &msg_type) {
2864  return msg_type == NetMsgType::AVAHELLO ||
2865  msg_type == NetMsgType::AVAPOLL ||
2866  msg_type == NetMsgType::AVARESPONSE ||
2867  msg_type == NetMsgType::AVAPROOF;
2868 }
2869 
2870 void PeerManager::ProcessMessage(const Config &config, CNode &pfrom,
2871  const std::string &msg_type,
2872  CDataStream &vRecv,
2873  const std::chrono::microseconds time_received,
2874  const std::atomic<bool> &interruptMsgProc) {
2875  LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n",
2876  SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
2877  if (gArgs.IsArgSet("-dropmessagestest") &&
2878  GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0) {
2879  LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
2880  return;
2881  }
2882 
2883  PeerRef peer = GetPeerRef(pfrom.GetId());
2884  if (peer == nullptr) {
2885  return;
2886  }
2887 
2888  if (IsAvalancheMessageType(msg_type)) {
2889  if (!g_avalanche) {
2891  "Avalanche is not initialized, ignoring %s message\n",
2892  msg_type);
2893  return;
2894  }
2895 
2896  if (!isAvalancheEnabled(gArgs)) {
2897  Misbehaving(pfrom, 20, "unsolicited-" + msg_type);
2898  return;
2899  }
2900  }
2901 
2902  if (msg_type == NetMsgType::VERSION) {
2903  // Each connection can only send one version message
2904  if (pfrom.nVersion != 0) {
2905  Misbehaving(pfrom, 1, "redundant version message");
2906  return;
2907  }
2908 
2909  int64_t nTime;
2910  CAddress addrMe;
2911  CAddress addrFrom;
2912  uint64_t nNonce = 1;
2913  uint64_t nServiceInt;
2914  ServiceFlags nServices;
2915  int nVersion;
2916  std::string cleanSubVer;
2917  int nStartingHeight = -1;
2918  bool fRelay = true;
2919  uint64_t nExtraEntropy = 1;
2920 
2921  vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
2922  nServices = ServiceFlags(nServiceInt);
2923  if (!pfrom.IsInboundConn()) {
2924  m_connman.SetServices(pfrom.addr, nServices);
2925  }
2926  if (pfrom.ExpectServicesFromConn() &&
2927  !HasAllDesirableServiceFlags(nServices)) {
2929  "peer=%d does not offer the expected services "
2930  "(%08x offered, %08x expected); disconnecting\n",
2931  pfrom.GetId(), nServices,
2932  GetDesirableServiceFlags(nServices));
2933  pfrom.fDisconnect = true;
2934  return;
2935  }
2936 
2937  if (nVersion < MIN_PEER_PROTO_VERSION) {
2938  // disconnect from peers older than this proto version
2940  "peer=%d using obsolete version %i; disconnecting\n",
2941  pfrom.GetId(), nVersion);
2942  pfrom.fDisconnect = true;
2943  return;
2944  }
2945 
2946  if (!vRecv.empty()) {
2947  vRecv >> addrFrom >> nNonce;
2948  }
2949  if (!vRecv.empty()) {
2950  std::string strSubVer;
2951  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
2952  cleanSubVer = SanitizeString(strSubVer);
2953  }
2954  if (!vRecv.empty()) {
2955  vRecv >> nStartingHeight;
2956  }
2957  if (!vRecv.empty()) {
2958  vRecv >> fRelay;
2959  }
2960  if (!vRecv.empty()) {
2961  vRecv >> nExtraEntropy;
2962  }
2963  // Disconnect if we connected to ourself
2964  if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) {
2965  LogPrintf("connected to self at %s, disconnecting\n",
2966  pfrom.addr.ToString());
2967  pfrom.fDisconnect = true;
2968  return;
2969  }
2970 
2971  if (pfrom.IsInboundConn() && addrMe.IsRoutable()) {
2972  SeenLocal(addrMe);
2973  }
2974 
2975  // Be shy and don't send version until we hear
2976  if (pfrom.IsInboundConn()) {
2977  PushNodeVersion(config, pfrom, m_connman, GetAdjustedTime());
2978  }
2979 
2980  // Change version
2981  const int greatest_common_version =
2982  std::min(nVersion, PROTOCOL_VERSION);
2983  pfrom.SetCommonVersion(greatest_common_version);
2984  pfrom.nVersion = nVersion;
2985 
2986  const CNetMsgMaker msg_maker(greatest_common_version);
2987 
2988  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
2989 
2990  // Signal ADDRv2 support (BIP155).
2991  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
2992 
2993  pfrom.nServices = nServices;
2994  pfrom.SetAddrLocal(addrMe);
2995  {
2996  LOCK(pfrom.cs_SubVer);
2997  pfrom.cleanSubVer = cleanSubVer;
2998  }
2999  pfrom.nStartingHeight = nStartingHeight;
3000 
3001  // set nodes not relaying blocks and tx and not serving (parts) of the
3002  // historical blockchain as "clients"
3003  pfrom.fClient = (!(nServices & NODE_NETWORK) &&
3004  !(nServices & NODE_NETWORK_LIMITED));
3005 
3006  // set nodes not capable of serving the complete blockchain history as
3007  // "limited nodes"
3008  pfrom.m_limited_node =
3009  (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
3010 
3011  if (pfrom.m_tx_relay != nullptr) {
3012  LOCK(pfrom.m_tx_relay->cs_filter);
3013  // set to true after we get the first filter* message
3014  pfrom.m_tx_relay->fRelayTxes = fRelay;
3015  }
3016 
3017  pfrom.nRemoteHostNonce = nNonce;
3018  pfrom.nRemoteExtraEntropy = nExtraEntropy;
3019 
3020  // Potentially mark this peer as a preferred download peer.
3021  {
3022  LOCK(cs_main);
3023  UpdatePreferredDownload(pfrom, State(pfrom.GetId()));
3024  }
3025 
3026  if (!pfrom.IsInboundConn() && !pfrom.IsBlockOnlyConn()) {
3027  // For outbound peers, we try to relay our address (so that other
3028  // nodes can try to find us more quickly, as we have no guarantee
3029  // that an outbound peer is even aware of how to reach us) and do a
3030  // one-time address fetch (to help populate/update our addrman). If
3031  // we're starting up for the first time, our addrman may be pretty
3032  // empty and no one will know who we are, so these mechanisms are
3033  // important to help us connect to the network.
3034  //
3035  // We also update the addrman to record connection success for
3036  // these peers (which include OUTBOUND_FULL_RELAY and FEELER
3037  // connections) so that addrman will have an up-to-date notion of
3038  // which peers are online and available.
3039  //
3040  // We skip these operations for BLOCK_RELAY peers to avoid
3041  // potentially leaking information about our BLOCK_RELAY
3042  // connections via the addrman or address relay.
3043  if (fListen && !::ChainstateActive().IsInitialBlockDownload()) {
3044  CAddress addr =
3045  GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices());
3046  FastRandomContext insecure_rand;
3047  if (addr.IsRoutable()) {
3049  "ProcessMessages: advertising address %s\n",
3050  addr.ToString());
3051  pfrom.PushAddress(addr, insecure_rand);
3052  } else if (IsPeerAddrLocalGood(&pfrom)) {
3053  addr.SetIP(addrMe);
3055  "ProcessMessages: advertising address %s\n",
3056  addr.ToString());
3057  pfrom.PushAddress(addr, insecure_rand);
3058  }
3059  }
3060 
3061  // Get recent addresses
3062  m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version)
3063  .Make(NetMsgType::GETADDR));
3064  pfrom.fGetAddr = true;
3065 
3066  // Moves address from New to Tried table in Addrman, resolves
3067  // tried-table collisions, etc.
3069  }
3070 
3071  std::string remoteAddr;
3072  if (fLogIPs) {
3073  remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
3074  }
3075 
3077  "receive version message: [%s] %s: version %d, blocks=%d, "
3078  "us=%s, peer=%d%s\n",
3079  pfrom.addr.ToString(), cleanSubVer, pfrom.nVersion,
3080  pfrom.nStartingHeight, addrMe.ToString(), pfrom.GetId(),
3081  remoteAddr);
3082 
3083  // Ignore time offsets that are improbable (before the Genesis block)
3084  // and may underflow the nTimeOffset calculation.
3085  int64_t currentTime = GetTime();
3086  if (nTime >= int64_t(m_chainparams.GenesisBlock().nTime)) {
3087  int64_t nTimeOffset = nTime - currentTime;
3088  pfrom.nTimeOffset = nTimeOffset;
3089  AddTimeData(pfrom.addr, nTimeOffset);
3090  } else {
3091  Misbehaving(pfrom, 20,
3092  "Ignoring invalid timestamp in version message");
3093  }
3094 
3095  // Feeler connections exist only to verify if address is online.
3096  if (pfrom.IsFeelerConn()) {
3097  pfrom.fDisconnect = true;
3098  }
3099  return;
3100  }
3101 
3102  if (pfrom.nVersion == 0) {
3103  // Must have a version message before anything else
3104  Misbehaving(pfrom, 10, "non-version message before version handshake");
3105  return;
3106  }
3107 
3108  // At this point, the outgoing message serialization version can't change.
3109  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3110 
3111  if (msg_type == NetMsgType::VERACK) {
3112  if (pfrom.fSuccessfullyConnected) {
3113  return;
3114  }
3115 
3116  if (!pfrom.IsInboundConn()) {
3117  // Mark this node as currently connected, so we update its timestamp
3118  // later.
3119  LOCK(cs_main);
3120  State(pfrom.GetId())->fCurrentlyConnected = true;
3121  LogPrintf(
3122  "New outbound peer connected: version: %d, blocks=%d, "
3123  "peer=%d%s (%s)\n",
3124  pfrom.nVersion.load(), pfrom.nStartingHeight, pfrom.GetId(),
3125  (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString())
3126  : ""),
3127  pfrom.m_tx_relay == nullptr ? "block-relay" : "full-relay");
3128  }
3129 
3130  if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) {
3131  // Tell our peer we prefer to receive headers rather than inv's
3132  // We send this to non-NODE NETWORK peers as well, because even
3133  // non-NODE NETWORK peers can announce blocks (such as pruning
3134  // nodes)
3135  m_connman.PushMessage(&pfrom,
3136  msgMaker.Make(NetMsgType::SENDHEADERS));
3137  }
3138 
3139  if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
3140  // Tell our peer we are willing to provide version 1 or 2
3141  // cmpctblocks. However, we do not request new block announcements
3142  // using cmpctblock messages. We send this to non-NODE NETWORK peers
3143  // as well, because they may wish to request compact blocks from us.
3144  bool fAnnounceUsingCMPCTBLOCK = false;
3145  uint64_t nCMPCTBLOCKVersion = 1;
3146  m_connman.PushMessage(&pfrom,
3147  msgMaker.Make(NetMsgType::SENDCMPCT,
3148  fAnnounceUsingCMPCTBLOCK,
3149  nCMPCTBLOCKVersion));
3150  }
3151 
3152  if ((pfrom.nServices & NODE_AVALANCHE) && g_avalanche &&
3154  if (g_avalanche->sendHello(&pfrom)) {
3155  LogPrint(BCLog::AVALANCHE, "Send avahello to peer %d\n",
3156  pfrom.GetId());
3157 
3158  auto localProof = g_avalanche->getLocalProof();
3159  // If we sent a hello message, we should have a proof
3160  assert(localProof);
3161 
3162  // Add our proof id to the list or the recently announced proof
3163  // INVs to this peer. This is used for filtering which INV can
3164  // be requested for download.
3165  LOCK(cs_main);
3166  State(pfrom.GetId())
3167  ->m_recently_announced_proofs.insert(localProof->getId());
3168  }
3169  }
3170 
3171  pfrom.fSuccessfullyConnected = true;
3172  return;
3173  }
3174 
3175  if (!pfrom.fSuccessfullyConnected) {
3176  // Must have a verack message before anything else
3177  Misbehaving(pfrom, 10, "non-verack message before version handshake");
3178  return;
3179  }
3180 
3181  if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
3182  int stream_version = vRecv.GetVersion();
3183  if (msg_type == NetMsgType::ADDRV2) {
3184  // Add ADDRV2_FORMAT to the version so that the CNetAddr and
3185  // CAddress unserialize methods know that an address in v2 format is
3186  // coming.
3187  stream_version |= ADDRV2_FORMAT;
3188  }
3189 
3190  OverrideStream<CDataStream> s(&vRecv, vRecv.GetType(), stream_version);
3191  std::vector<CAddress> vAddr;
3192 
3193  s >> vAddr;
3194 
3195  if (!pfrom.RelayAddrsWithConn()) {
3196  return;
3197  }
3198  if (vAddr.size() > MAX_ADDR_TO_SEND) {
3199  Misbehaving(
3200  pfrom, 20,
3201  strprintf("%s message size = %u", msg_type, vAddr.size()));
3202  return;
3203  }
3204 
3205  // Store the new addresses
3206  std::vector<CAddress> vAddrOk;
3207  int64_t nNow = GetAdjustedTime();
3208  int64_t nSince = nNow - 10 * 60;
3209  for (CAddress &addr : vAddr) {
3210  if (interruptMsgProc) {
3211  return;
3212  }
3213 
3214  // We only bother storing full nodes, though this may include things
3215  // which we would not make an outbound connection to, in part
3216  // because we may make feeler connections to them.
3217  if (!MayHaveUsefulAddressDB(addr.nServices) &&
3219  continue;
3220  }
3221 
3222  if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) {
3223  addr.nTime = nNow - 5 * 24 * 60 * 60;
3224  }
3225  pfrom.AddAddressKnown(addr);
3226  if (m_banman &&
3227  (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
3228  // Do not process banned/discouraged addresses beyond
3229  // remembering we received them
3230  continue;
3231  }
3232  bool fReachable = IsReachable(addr);
3233  if (addr.nTime > nSince && !pfrom.fGetAddr && vAddr.size() <= 10 &&
3234  addr.IsRoutable()) {
3235  // Relay to a limited number of other nodes
3236  RelayAddress(addr, fReachable, m_connman);
3237  }
3238  // Do not store addresses outside our network
3239  if (fReachable) {
3240  vAddrOk.push_back(addr);
3241  }
3242  }
3243 
3244  m_connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60);
3245  if (vAddr.size() < 1000) {
3246  pfrom.fGetAddr = false;
3247  }
3248  if (pfrom.IsAddrFetchConn()) {
3249  pfrom.fDisconnect = true;
3250  }
3251  return;
3252  }
3253 
3254  if (msg_type == NetMsgType::SENDADDRV2) {
3255  pfrom.m_wants_addrv2 = true;
3256  return;
3257  }
3258 
3259  if (msg_type == NetMsgType::SENDHEADERS) {
3260  LOCK(cs_main);
3261  State(pfrom.GetId())->fPreferHeaders = true;
3262  return;
3263  }
3264 
3265  if (msg_type == NetMsgType::SENDCMPCT) {
3266  bool fAnnounceUsingCMPCTBLOCK = false;
3267  uint64_t nCMPCTBLOCKVersion = 0;
3268  vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
3269  if (nCMPCTBLOCKVersion == 1) {
3270  LOCK(cs_main);
3271  // fProvidesHeaderAndIDs is used to "lock in" version of compact
3272  // blocks we send.
3273  if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) {
3274  State(pfrom.GetId())->fProvidesHeaderAndIDs = true;
3275  }
3276 
3277  State(pfrom.GetId())->fPreferHeaderAndIDs =
3278  fAnnounceUsingCMPCTBLOCK;
3279  if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) {
3280  State(pfrom.GetId())->fSupportsDesiredCmpctVersion = true;
3281  }
3282  }
3283  return;
3284  }
3285 
3286  if (msg_type == NetMsgType::INV) {
3287  std::vector<CInv> vInv;
3288  vRecv >> vInv;
3289  if (vInv.size() > MAX_INV_SZ) {
3290  Misbehaving(pfrom, 20,
3291  strprintf("inv message size = %u", vInv.size()));
3292  return;
3293  }
3294 
3295  // We won't accept tx inv's if we're in blocks-only mode, or this is a
3296  // block-relay-only peer
3297  bool fBlocksOnly = !g_relay_txes || (pfrom.m_tx_relay == nullptr);
3298 
3299  // Allow peers with relay permission to send data other than blocks
3300  // in blocks only mode
3301  if (pfrom.HasPermission(PF_RELAY)) {
3302  fBlocksOnly = false;
3303  }
3304 
3305  const auto current_time = GetTime<std::chrono::microseconds>();
3306  std::optional<BlockHash> best_block;
3307 
3308  auto logInv = [&](const CInv &inv, bool fAlreadyHave) {
3309  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(),
3310  fAlreadyHave ? "have" : "new", pfrom.GetId());
3311  };
3312 
3313  for (CInv &inv : vInv) {
3314  if (interruptMsgProc) {
3315  return;
3316  }
3317 
3318  if (inv.IsMsgBlk()) {
3319  LOCK(cs_main);
3320  const bool fAlreadyHave = AlreadyHaveBlock(BlockHash(inv.hash));
3321  logInv(inv, fAlreadyHave);
3322 
3323  const BlockHash hash{inv.hash};
3324  UpdateBlockAvailability(pfrom.GetId(), hash);
3325  if (!fAlreadyHave && !fImporting && !fReindex &&
3326  !mapBlocksInFlight.count(hash)) {
3327  // Headers-first is the primary method of announcement on
3328  // the network. If a node fell back to sending blocks by
3329  // inv, it's probably for a re-org. The final block hash
3330  // provided should be the highest, so send a getheaders and
3331  // then fetch the blocks we need to catch up.
3332  best_block = std::move(hash);
3333  }
3334 
3335  continue;
3336  }
3337 
3338  if (inv.IsMsgProof()) {
3339  const avalanche::ProofId proofid(inv.hash);
3340  const bool fAlreadyHave = AlreadyHaveProof(proofid);
3341  logInv(inv, fAlreadyHave);
3342  pfrom.AddKnownProof(proofid);
3343 
3344  if (!fAlreadyHave && g_avalanche && isAvalancheEnabled(gArgs)) {
3345  const bool preferred = isPreferredDownloadPeer(pfrom);
3346 
3348  AddProofAnnouncement(pfrom, proofid, current_time,
3349  preferred);
3350  }
3351  continue;
3352  }
3353 
3354  if (inv.IsMsgTx()) {
3355  LOCK(cs_main);
3356  const TxId txid(inv.hash);
3357  const bool fAlreadyHave = AlreadyHaveTx(txid, m_mempool);
3358  logInv(inv, fAlreadyHave);
3359 
3360  pfrom.AddKnownTx(txid);
3361  if (fBlocksOnly) {
3363  "transaction (%s) inv sent in violation of "
3364  "protocol, disconnecting peer=%d\n",
3365  txid.ToString(), pfrom.GetId());
3366  pfrom.fDisconnect = true;
3367  return;
3368  } else if (!fAlreadyHave && !m_chainman.ActiveChainstate()
3370  AddTxAnnouncement(pfrom, txid, current_time);
3371  }
3372 
3373  continue;
3374  }
3375 
3377  "Unknown inv type \"%s\" received from peer=%d\n",
3378  inv.ToString(), pfrom.GetId());
3379  }
3380 
3381  if (best_block) {
3383  &pfrom,
3384  msgMaker.Make(NetMsgType::GETHEADERS,
3385  ::ChainActive().GetLocator(pindexBestHeader),
3386  *best_block));
3387  LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
3388  pindexBestHeader->nHeight, best_block->ToString(),
3389  pfrom.GetId());
3390  }
3391 
3392  return;
3393  }
3394 
3395  if (msg_type == NetMsgType::GETDATA) {
3396  std::vector<CInv> vInv;
3397  vRecv >> vInv;
3398  if (vInv.size() > MAX_INV_SZ) {
3399  Misbehaving(pfrom, 20,
3400  strprintf("getdata message size = %u", vInv.size()));
3401  return;
3402  }
3403 
3404  LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
3405  vInv.size(), pfrom.GetId());
3406 
3407  if (vInv.size() > 0) {
3408  LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
3409  vInv[0].ToString(), pfrom.GetId());
3410  }
3411 
3412  {
3413  LOCK(peer->m_getdata_requests_mutex);
3414  peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
3415  vInv.begin(), vInv.end());
3416  ProcessGetData(config, pfrom, *peer, m_connman, m_mempool,
3417  interruptMsgProc);
3418  }
3419 
3420  return;
3421  }
3422 
3423  if (msg_type == NetMsgType::GETBLOCKS) {
3424  CBlockLocator locator;
3425  uint256 hashStop;
3426  vRecv >> locator >> hashStop;
3427 
3428  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
3430  "getblocks locator size %lld > %d, disconnect peer=%d\n",
3431  locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
3432  pfrom.fDisconnect = true;
3433  return;
3434  }
3435 
3436  // We might have announced the currently-being-connected tip using a
3437  // compact block, which resulted in the peer sending a getblocks
3438  // request, which we would otherwise respond to without the new block.
3439  // To avoid this situation we simply verify that we are on our best
3440  // known chain now. This is super overkill, but we handle it better
3441  // for getheaders requests, and there are no known nodes which support
3442  // compact blocks but still use getblocks to request blocks.
3443  {
3444  std::shared_ptr<const CBlock> a_recent_block;
3445  {
3447  a_recent_block = most_recent_block;
3448  }
3449  BlockValidationState state;
3450  if (!ActivateBestChain(config, state, a_recent_block)) {
3451  LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
3452  state.ToString());
3453  }
3454  }
3455 
3456  LOCK(cs_main);
3457 
3458  // Find the last block the caller has in the main chain
3459  const CBlockIndex *pindex =
3460  FindForkInGlobalIndex(::ChainActive(), locator);
3461 
3462  // Send the rest of the chain
3463  if (pindex) {
3464  pindex = ::ChainActive().Next(pindex);
3465  }
3466  int nLimit = 500;
3467  LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
3468  (pindex ? pindex->nHeight : -1),
3469  hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
3470  pfrom.GetId());
3471  for (; pindex; pindex = ::ChainActive().Next(pindex)) {
3472  if (pindex->GetBlockHash() == hashStop) {
3473  LogPrint(BCLog::NET, " getblocks stopping at %d %s\n",
3474  pindex->nHeight, pindex->GetBlockHash().ToString());
3475  break;
3476  }
3477  // If pruning, don't inv blocks unless we have on disk and are
3478  // likely to still have for some reasonable time window (1 hour)
3479  // that block relay might require.
3480  const int nPrunedBlocksLikelyToHave =
3483  if (fPruneMode &&
3484  (!pindex->nStatus.hasData() ||
3485  pindex->nHeight <= ::ChainActive().Tip()->nHeight -
3486  nPrunedBlocksLikelyToHave)) {
3487  LogPrint(
3488  BCLog::NET,
3489  " getblocks stopping, pruned or too old block at %d %s\n",
3490  pindex->nHeight, pindex->GetBlockHash().ToString());
3491  break;
3492  }
3493  WITH_LOCK(pfrom.cs_inventory, pfrom.vInventoryBlockToSend.push_back(
3494  pindex->GetBlockHash()));
3495  if (--nLimit <= 0) {
3496  // When this block is requested, we'll send an inv that'll
3497  // trigger the peer to getblocks the next batch of inventory.
3498  LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n",
3499  pindex->nHeight, pindex->GetBlockHash().ToString());
3500  pfrom.hashContinue = pindex->GetBlockHash();
3501  break;
3502  }
3503  }
3504  return;
3505  }
3506 
3507  if (msg_type == NetMsgType::GETBLOCKTXN) {
3509  vRecv >> req;
3510 
3511  std::shared_ptr<const CBlock> recent_block;
3512  {
3514  if (most_recent_block_hash == req.blockhash) {
3515  recent_block = most_recent_block;
3516  }
3517  // Unlock cs_most_recent_block to avoid cs_main lock inversion
3518  }
3519  if (recent_block) {
3520  SendBlockTransactions(pfrom, *recent_block, req);
3521  return;
3522  }
3523 
3524  {
3525  LOCK(cs_main);
3526 
3527  const CBlockIndex *pindex = LookupBlockIndex(req.blockhash);
3528  if (!pindex || !pindex->nStatus.hasData()) {
3529  LogPrint(
3530  BCLog::NET,
3531  "Peer %d sent us a getblocktxn for a block we don't have\n",
3532  pfrom.GetId());
3533  return;
3534  }
3535 
3536  if (pindex->nHeight >=
3537  ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) {
3538  CBlock block;
3539  bool ret = ReadBlockFromDisk(block, pindex,
3541  assert(ret);
3542 
3543  SendBlockTransactions(pfrom, block, req);
3544  return;
3545  }
3546  }
3547 
3548  // If an older block is requested (should never happen in practice,
3549  // but can happen in tests) send a block response instead of a
3550  // blocktxn response. Sending a full block response instead of a
3551  // small blocktxn response is preferable in the case where a peer
3552  // might maliciously send lots of getblocktxn requests to trigger
3553  // expensive disk reads, because it will require the peer to
3554  // actually receive all the data read from disk over the network.
3556  "Peer %d sent us a getblocktxn for a block > %i deep\n",
3557  pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
3558  CInv inv;
3559  inv.type = MSG_BLOCK;
3560  inv.hash = req.blockhash;
3561  WITH_LOCK(peer->m_getdata_requests_mutex,
3562  peer->m_getdata_requests.push_back(inv));
3563  // The message processing loop will go around again (without pausing)
3564  // and we'll respond then (without cs_main)
3565  return;
3566  }
3567 
3568  if (msg_type == NetMsgType::GETHEADERS) {
3569  CBlockLocator locator;
3570  BlockHash hashStop;
3571  vRecv >> locator >> hashStop;
3572 
3573  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
3575  "getheaders locator size %lld > %d, disconnect peer=%d\n",
3576  locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
3577  pfrom.fDisconnect = true;
3578  return;
3579  }
3580 
3581  LOCK(cs_main);
3582  if (::ChainstateActive().IsInitialBlockDownload() &&
3583  !pfrom.HasPermission(PF_DOWNLOAD)) {
3585  "Ignoring getheaders from peer=%d because node is in "
3586  "initial block download\n",
3587  pfrom.GetId());
3588  return;
3589  }
3590 
3591  CNodeState *nodestate = State(pfrom.GetId());
3592  const CBlockIndex *pindex = nullptr;
3593  if (locator.IsNull()) {
3594  // If locator is null, return the hashStop block
3595  pindex = LookupBlockIndex(hashStop);
3596  if (!pindex) {
3597  return;
3598  }
3599 
3602  "%s: ignoring request from peer=%i for old block "
3603  "header that isn't in the main chain\n",
3604  __func__, pfrom.GetId());
3605  return;
3606  }
3607  } else {
3608  // Find the last block the caller has in the main chain
3609  pindex = FindForkInGlobalIndex(::ChainActive(), locator);
3610  if (pindex) {
3611  pindex = ::ChainActive().Next(pindex);
3612  }
3613  }
3614 
3615  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
3616  // count at the end
3617  std::vector<CBlock> vHeaders;
3618  int nLimit = MAX_HEADERS_RESULTS;
3619  LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
3620  (pindex ? pindex->nHeight : -1),
3621  hashStop.IsNull() ? "end" : hashStop.ToString(),
3622  pfrom.GetId());
3623  for (; pindex; pindex = ::ChainActive().Next(pindex)) {
3624  vHeaders.push_back(pindex->GetBlockHeader());
3625  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
3626  break;
3627  }
3628  }
3629  // pindex can be nullptr either if we sent ::ChainActive().Tip() OR
3630  // if our peer has ::ChainActive().Tip() (and thus we are sending an
3631  // empty headers message). In both cases it's safe to update
3632  // pindexBestHeaderSent to be our tip.
3633  //
3634  // It is important that we simply reset the BestHeaderSent value here,
3635  // and not max(BestHeaderSent, newHeaderSent). We might have announced
3636  // the currently-being-connected tip using a compact block, which
3637  // resulted in the peer sending a headers request, which we respond to
3638  // without the new block. By resetting the BestHeaderSent, we ensure we
3639  // will re-announce the new block via headers (or compact blocks again)
3640  // in the SendMessages logic.
3641  nodestate->pindexBestHeaderSent =
3642  pindex ? pindex : ::ChainActive().Tip();
3643  m_connman.PushMessage(&pfrom,
3644  msgMaker.Make(NetMsgType::HEADERS, vHeaders));
3645  return;
3646  }
3647 
3648  if (msg_type == NetMsgType::TX) {
3649  // Stop processing the transaction early if
3650  // 1) We are in blocks only mode and peer has no relay permission
3651  // 2) This peer is a block-relay-only peer
3652  if ((!g_relay_txes && !pfrom.HasPermission(PF_RELAY)) ||
3653  (pfrom.m_tx_relay == nullptr)) {
3655  "transaction sent in violation of protocol peer=%d\n",
3656  pfrom.GetId());
3657  pfrom.fDisconnect = true;
3658  return;
3659  }
3660 
3661  CTransactionRef ptx;
3662  vRecv >> ptx;
3663  const CTransaction &tx = *ptx;
3664  const TxId &txid = tx.GetId();
3665  pfrom.AddKnownTx(txid);
3666 
3668 
3669  TxValidationState state;
3670 
3671  m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
3672 
3673  if (!AlreadyHaveTx(txid, m_mempool) &&
3674  AcceptToMemoryPool(config, m_mempool, state, ptx,
3675  false /* bypass_limits */)) {
3676  m_mempool.check(&::ChainstateActive().CoinsTip());
3677  // As this version of the transaction was acceptable, we can forget
3678  // about any requests for it.
3679  m_txrequest.ForgetInvId(tx.GetId());
3681  for (size_t i = 0; i < tx.vout.size(); i++) {
3682  auto it_by_prev =
3683  mapOrphanTransactionsByPrev.find(COutPoint(txid, i));
3684  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
3685  for (const auto &elem : it_by_prev->second) {
3686  peer->m_orphan_work_set.insert(elem->first);
3687  }
3688  }
3689  }
3690 
3691  pfrom.nLastTXTime = GetTime();
3692 
3694  "AcceptToMemoryPool: peer=%d: accepted %s "
3695  "(poolsz %u txn, %u kB)\n",
3696  pfrom.GetId(), tx.GetId().ToString(), m_mempool.size(),
3697  m_mempool.DynamicMemoryUsage() / 1000);
3698 
3699  // Recursively process any orphan transactions that depended on this
3700  // one
3701  ProcessOrphanTx(config, peer->m_orphan_work_set);
3702  } else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
3703  // It may be the case that the orphans parents have all been
3704  // rejected.
3705  bool fRejectedParents = false;
3706 
3707  // Deduplicate parent txids, so that we don't have to loop over
3708  // the same parent txid more than once down below.
3709  std::vector<TxId> unique_parents;
3710  unique_parents.reserve(tx.vin.size());
3711  for (const CTxIn &txin : tx.vin) {
3712  // We start with all parents, and then remove duplicates below.
3713  unique_parents.push_back(txin.prevout.GetTxId());
3714  }
3715  std::sort(unique_parents.begin(), unique_parents.end());
3716  unique_parents.erase(
3717  std::unique(unique_parents.begin(), unique_parents.end()),
3718  unique_parents.end());
3719  for (const TxId &parent_txid : unique_parents) {
3720  if (recentRejects->contains(parent_txid)) {
3721  fRejectedParents = true;
3722  break;
3723  }
3724  }
3725  if (!fRejectedParents) {
3726  const auto current_time = GetTime<std::chrono::microseconds>();
3727 
3728  for (const TxId &parent_txid : unique_parents) {
3729  // FIXME: MSG_TX should use a TxHash, not a TxId.
3730  pfrom.AddKnownTx(parent_txid);
3731  if (!AlreadyHaveTx(parent_txid, m_mempool)) {
3732  AddTxAnnouncement(pfrom, parent_txid, current_time);
3733  }
3734  }
3735  AddOrphanTx(ptx, pfrom.GetId());
3736 
3737  // Once added to the orphan pool, a tx is considered
3738  // AlreadyHave, and we shouldn't request it anymore.
3739  m_txrequest.ForgetInvId(tx.GetId());
3740 
3741  // DoS prevention: do not allow mapOrphanTransactions to grow
3742  // unbounded (see CVE-2012-3789)
3743  unsigned int nMaxOrphanTx = (unsigned int)std::max(
3744  int64_t(0), gArgs.GetArg("-maxorphantx",
3746  unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
3747  if (nEvicted > 0) {
3749  "mapOrphan overflow, removed %u tx\n", nEvicted);
3750  }
3751  } else {
3753  "not keeping orphan with rejected parents %s\n",
3754  tx.GetId().ToString());
3755  // We will continue to reject this tx since it has rejected
3756  // parents so avoid re-requesting it from other peers.
3757  recentRejects->insert(tx.GetId());
3758  m_txrequest.ForgetInvId(tx.GetId());
3759  }
3760  } else {
3761  assert(recentRejects);
3762  recentRejects->insert(tx.GetId());
3763  m_txrequest.ForgetInvId(tx.GetId());
3764 
3765  if (RecursiveDynamicUsage(*ptx) < 100000) {
3767  }
3768 
3769  if (pfrom.HasPermission(PF_FORCERELAY)) {
3770  // Always relay transactions received from peers with
3771  // forcerelay permission, even if they were already in the
3772  // mempool, allowing the node to function as a gateway for
3773  // nodes hidden behind it.
3774  if (!m_mempool.exists(tx.GetId())) {
3775  LogPrintf("Not relaying non-mempool transaction %s from "
3776  "forcerelay peer=%d\n",
3777  tx.GetId().ToString(), pfrom.GetId());
3778  } else {
3779  LogPrintf("Force relaying tx %s from peer=%d\n",
3780  tx.GetId().ToString(), pfrom.GetId());
3782  }
3783  }
3784  }
3785 
3786  // If a tx has been detected by recentRejects, we will have reached
3787  // this point and the tx will have been ignored. Because we haven't run
3788  // the tx through AcceptToMemoryPool, we won't have computed a DoS
3789  // score for it or determined exactly why we consider it invalid.
3790  //
3791  // This means we won't penalize any peer subsequently relaying a DoSy
3792  // tx (even if we penalized the first peer who gave it to us) because
3793  // we have to account for recentRejects showing false positives. In
3794  // other words, we shouldn't penalize a peer if we aren't *sure* they
3795  // submitted a DoSy tx.
3796  //
3797  // Note that recentRejects doesn't just record DoSy or invalid
3798  // transactions, but any tx not accepted by the mempool, which may be
3799  // due to node policy (vs. consensus). So we can't blanket penalize a
3800  // peer simply for relaying a tx that our recentRejects has caught,
3801  // regardless of false positives.
3802 
3803  if (state.IsInvalid()) {
3805  "%s from peer=%d was not accepted: %s\n",
3806  tx.GetHash().ToString(), pfrom.GetId(), state.ToString());
3807  MaybePunishNodeForTx(pfrom.GetId(), state);
3808  }
3809  return;
3810  }
3811 
3812  if (msg_type == NetMsgType::CMPCTBLOCK) {
3813  // Ignore cmpctblock received while importing
3814  if (fImporting || fReindex) {
3816  "Unexpected cmpctblock message received from peer %d\n",
3817  pfrom.GetId());
3818  return;
3819  }
3820 
3821  CBlockHeaderAndShortTxIDs cmpctblock;
3822  vRecv >> cmpctblock;
3823 
3824  bool received_new_header = false;
3825 
3826  {
3827  LOCK(cs_main);
3828 
3829  if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
3830  // Doesn't connect (or is genesis), instead of DoSing in
3831  // AcceptBlockHeader, request deeper headers
3832  if (!::ChainstateActive().IsInitialBlockDownload()) {
3834  &pfrom, msgMaker.Make(NetMsgType::GETHEADERS,
3835  ::ChainActive().GetLocator(
3837  uint256()));
3838  }
3839  return;
3840  }
3841 
3842  if (!LookupBlockIndex(cmpctblock.header.GetHash())) {
3843  received_new_header = true;
3844  }
3845  }
3846 
3847  const CBlockIndex *pindex = nullptr;
3848  BlockValidationState state;
3849  if (!m_chainman.ProcessNewBlockHeaders(config, {cmpctblock.header},
3850  state, &pindex)) {
3851  if (state.IsInvalid()) {
3852  MaybePunishNodeForBlock(pfrom.GetId(), state,
3853  /*via_compact_block*/ true,
3854  "invalid header via cmpctblock");
3855  return;
3856  }
3857  }
3858 
3859  // When we succeed in decoding a block's txids from a cmpctblock
3860  // message we typically jump to the BLOCKTXN handling code, with a
3861  // dummy (empty) BLOCKTXN message, to re-use the logic there in
3862  // completing processing of the putative block (without cs_main).
3863  bool fProcessBLOCKTXN = false;
3865 
3866  // If we end up treating this as a plain headers message, call that as
3867  // well
3868  // without cs_main.
3869  bool fRevertToHeaderProcessing = false;
3870 
3871  // Keep a CBlock for "optimistic" compactblock reconstructions (see
3872  // below)
3873  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3874  bool fBlockReconstructed = false;
3875 
3876  {
3878  // If AcceptBlockHeader returned true, it set pindex
3879  assert(pindex);
3880  UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
3881 
3882  CNodeState *nodestate = State(pfrom.GetId());
3883 
3884  // If this was a new header with more work than our tip, update the
3885  // peer's last block announcement time
3886  if (received_new_header &&
3887  pindex->nChainWork > ::ChainActive().Tip()->nChainWork) {
3888  nodestate->m_last_block_announcement = GetTime();
3889  }
3890 
3891  std::map<BlockHash,
3892  std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
3893  iterator blockInFlightIt =
3894  mapBlocksInFlight.find(pindex->GetBlockHash());
3895  bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
3896 
3897  if (pindex->nStatus.hasData()) {
3898  // Nothing to do here
3899  return;
3900  }
3901 
3902  if (pindex->nChainWork <=
3903  ::ChainActive()
3904  .Tip()
3905  ->nChainWork || // We know something better
3906  pindex->nTx != 0) {
3907  // We had this block at some point, but pruned it
3908  if (fAlreadyInFlight) {
3909  // We requested this block for some reason, but our mempool
3910  // will probably be useless so we just grab the block via
3911  // normal getdata.
3912  std::vector<CInv> vInv(1);
3913  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
3915  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3916  }
3917  return;
3918  }
3919 
3920  // If we're not close to tip yet, give up and let parallel block
3921  // fetch work its magic.
3922  if (!fAlreadyInFlight &&
3923  !CanDirectFetch(m_chainparams.GetConsensus())) {
3924  return;
3925  }
3926 
3927  // We want to be a bit conservative just to be extra careful about
3928  // DoS possibilities in compact block processing...
3929  if (pindex->nHeight <= ::ChainActive().Height() + 2) {
3930  if ((!fAlreadyInFlight && nodestate->nBlocksInFlight <
3932  (fAlreadyInFlight &&
3933  blockInFlightIt->second.first == pfrom.GetId())) {
3934  std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
3935  if (!MarkBlockAsInFlight(config, m_mempool, pfrom.GetId(),
3936  pindex->GetBlockHash(),
3938  pindex, &queuedBlockIt)) {
3939  if (!(*queuedBlockIt)->partialBlock) {
3940  (*queuedBlockIt)
3941  ->partialBlock.reset(
3942  new PartiallyDownloadedBlock(config,
3943  &m_mempool));
3944  } else {
3945  // The block was already in flight using compact
3946  // blocks from the same peer.
3947  LogPrint(BCLog::NET, "Peer sent us compact block "
3948  "we were already syncing!\n");
3949  return;
3950  }
3951  }
3952 
3953  PartiallyDownloadedBlock &partialBlock =
3954  *(*queuedBlockIt)->partialBlock;
3955  ReadStatus status =
3956  partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
3957  if (status == READ_STATUS_INVALID) {
3958  // Reset in-flight state in case Misbehaving does not
3959  // result in a disconnect
3960  MarkBlockAsReceived(pindex->GetBlockHash());
3961  Misbehaving(pfrom, 100, "invalid compact block");
3962  return;
3963  } else if (status == READ_STATUS_FAILED) {
3964  // Duplicate txindices, the block is now in-flight, so
3965  // just request it.
3966  std::vector<CInv> vInv(1);
3967  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
3969  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3970  return;
3971  }
3972 
3974  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
3975  if (!partialBlock.IsTxAvailable(i)) {
3976  req.indices.push_back(i);
3977  }
3978  }
3979  if (req.indices.empty()) {
3980  // Dirty hack to jump to BLOCKTXN code (TODO: move
3981  // message handling into their own functions)
3982  BlockTransactions txn;
3983  txn.blockhash = cmpctblock.header.GetHash();
3984  blockTxnMsg << txn;
3985  fProcessBLOCKTXN = true;
3986  } else {
3987  req.blockhash = pindex->GetBlockHash();
3989  &pfrom,
3990  msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
3991  }
3992  } else {
3993  // This block is either already in flight from a different
3994  // peer, or this peer has too many blocks outstanding to
3995  // download from. Optimistically try to reconstruct anyway
3996  // since we might be able to without any round trips.
3997  PartiallyDownloadedBlock tempBlock(config, &m_mempool);
3998  ReadStatus status =
3999  tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
4000  if (status != READ_STATUS_OK) {
4001  // TODO: don't ignore failures
4002  return;
4003  }
4004  std::vector<CTransactionRef> dummy;
4005  status = tempBlock.FillBlock(*pblock, dummy);
4006  if (status == READ_STATUS_OK) {
4007  fBlockReconstructed = true;
4008  }
4009  }
4010  } else {
4011  if (fAlreadyInFlight) {
4012  // We requested this block, but its far into the future, so
4013  // our mempool will probably be useless - request the block
4014  // normally.
4015  std::vector<CInv> vInv(1);
4016  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
4018  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
4019  return;
4020  } else {
4021  // If this was an announce-cmpctblock, we want the same
4022  // treatment as a header message.
4023  fRevertToHeaderProcessing = true;
4024  }
4025  }
4026  } // cs_main
4027 
4028  if (fProcessBLOCKTXN) {
4029  return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
4030  blockTxnMsg, time_received, interruptMsgProc);
4031  }
4032 
4033  if (fRevertToHeaderProcessing) {
4034  // Headers received from HB compact block peers are permitted to be
4035  // relayed before full validation (see BIP 152), so we don't want to
4036  // disconnect the peer if the header turns out to be for an invalid
4037  // block. Note that if a peer tries to build on an invalid chain,
4038  // that will be detected and the peer will be banned.
4039  return ProcessHeadersMessage(config, pfrom, {cmpctblock.header},
4040  /*via_compact_block=*/true);
4041  }
4042 
4043  if (fBlockReconstructed) {
4044  // If we got here, we were able to optimistically reconstruct a
4045  // block that is in flight from some other peer.
4046  {
4047  LOCK(cs_main);
4048  mapBlockSource.emplace(pblock->GetHash(),
4049  std::make_pair(pfrom.GetId(), false));
4050  }
4051  bool fNewBlock = false;
4052  // Setting fForceProcessing to true means that we bypass some of
4053  // our anti-DoS protections in AcceptBlock, which filters
4054  // unrequested blocks that might be trying to waste our resources
4055  // (eg disk space). Because we only try to reconstruct blocks when
4056  // we're close to caught up (via the CanDirectFetch() requirement
4057  // above, combined with the behavior of not requesting blocks until
4058  // we have a chain with at least nMinimumChainWork), and we ignore
4059  // compact blocks with less work than our tip, it is safe to treat
4060  // reconstructed compact blocks as having been requested.
4061  m_chainman.ProcessNewBlock(config, pblock,
4062  /*fForceProcessing=*/true, &fNewBlock);
4063  if (fNewBlock) {
4064  pfrom.nLastBlockTime = GetTime();
4065  } else {
4066  LOCK(cs_main);
4067  mapBlockSource.erase(pblock->GetHash());
4068  }
4069 
4070  // hold cs_main for CBlockIndex::IsValid()
4071  LOCK(cs_main);
4072  if (pindex->IsValid(BlockValidity::TRANSACTIONS)) {
4073  // Clear download state for this block, which is in process from
4074  // some other peer. We do this after calling. ProcessNewBlock so
4075  // that a malleated cmpctblock announcement can't be used to
4076  // interfere with block relay.
4077  MarkBlockAsReceived(pblock->GetHash());
4078  }
4079  }
4080  return;
4081  }
4082 
4083  if (msg_type == NetMsgType::BLOCKTXN) {
4084  // Ignore blocktxn received while importing
4085  if (fImporting || fReindex) {
4087  "Unexpected blocktxn message received from peer %d\n",
4088  pfrom.GetId());
4089  return;
4090  }
4091 
4092  BlockTransactions resp;
4093  vRecv >> resp;
4094 
4095  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4096  bool fBlockRead = false;
4097  {
4098  LOCK(cs_main);
4099 
4100  std::map<BlockHash,
4101  std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
4102  iterator it = mapBlocksInFlight.find(resp.blockhash);
4103  if (it == mapBlocksInFlight.end() ||
4104  !it->second.second->partialBlock ||
4105  it->second.first != pfrom.GetId()) {
4107  "Peer %d sent us block transactions for block "
4108  "we weren't expecting\n",
4109  pfrom.GetId());
4110  return;
4111  }
4112 
4113  PartiallyDownloadedBlock &partialBlock =
4114  *it->second.second->partialBlock;
4115  ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
4116  if (status == READ_STATUS_INVALID) {
4117  // Reset in-flight state in case of Misbehaving does not
4118  // result in a disconnect.
4119  MarkBlockAsReceived(resp.blockhash);
4120  Misbehaving(
4121  pfrom, 100,
4122  "invalid compact block/non-matching block transactions");
4123  return;
4124  } else if (status == READ_STATUS_FAILED) {
4125  // Might have collided, fall back to getdata now :(
4126  std::vector<CInv> invs;
4127  invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
4128  m_connman.PushMessage(&pfrom,
4129  msgMaker.Make(NetMsgType::GETDATA, invs));
4130  } else {
4131  // Block is either okay, or possibly we received
4132  // READ_STATUS_CHECKBLOCK_FAILED.
4133  // Note that CheckBlock can only fail for one of a few reasons:
4134  // 1. bad-proof-of-work (impossible here, because we've already
4135  // accepted the header)
4136  // 2. merkleroot doesn't match the transactions given (already
4137  // caught in FillBlock with READ_STATUS_FAILED, so
4138  // impossible here)
4139  // 3. the block is otherwise invalid (eg invalid coinbase,
4140  // block is too big, too many legacy sigops, etc).
4141  // So if CheckBlock failed, #3 is the only possibility.
4142  // Under BIP 152, we don't DoS-ban unless proof of work is
4143  // invalid (we don't require all the stateless checks to have
4144  // been run). This is handled below, so just treat this as
4145  // though the block was successfully read, and rely on the
4146  // handling in ProcessNewBlock to ensure the block index is
4147  // updated, etc.
4148 
4149  // it is now an empty pointer
4150  MarkBlockAsReceived(resp.blockhash);
4151  fBlockRead = true;
4152  // mapBlockSource is used for potentially punishing peers and
4153  // updating which peers send us compact blocks, so the race
4154  // between here and cs_main in ProcessNewBlock is fine.
4155  // BIP 152 permits peers to relay compact blocks after
4156  // validating the header only; we should not punish peers
4157  // if the block turns out to be invalid.
4158  mapBlockSource.emplace(resp.blockhash,
4159  std::make_pair(pfrom.GetId(), false));
4160  }
4161  } // Don't hold cs_main when we call into ProcessNewBlock
4162  if (fBlockRead) {
4163  bool fNewBlock = false;
4164  // Since we requested this block (it was in mapBlocksInFlight),
4165  // force it to be processed, even if it would not be a candidate for
4166  // new tip (missing previous block, chain not long enough, etc)
4167  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
4168  // disk-space attacks), but this should be safe due to the
4169  // protections in the compact block handler -- see related comment
4170  // in compact block optimistic reconstruction handling.
4171  m_chainman.ProcessNewBlock(config, pblock,
4172  /*fForceProcessing=*/true, &fNewBlock);
4173  if (fNewBlock) {
4174  pfrom.nLastBlockTime = GetTime();
4175  } else {
4176  LOCK(cs_main);
4177  mapBlockSource.erase(pblock->GetHash());
4178  }
4179  }
4180  return;
4181  }
4182 
4183  if (msg_type == NetMsgType::HEADERS) {
4184  // Ignore headers received while importing
4185  if (fImporting || fReindex) {
4187  "Unexpected headers message received from peer %d\n",
4188  pfrom.GetId());
4189  return;
4190  }
4191 
4192  std::vector<CBlockHeader> headers;
4193 
4194  // Bypass the normal CBlock deserialization, as we don't want to risk
4195  // deserializing 2000 full blocks.
4196  unsigned int nCount = ReadCompactSize(vRecv);
4197  if (nCount > MAX_HEADERS_RESULTS) {
4198  Misbehaving(pfrom, 20,
4199  strprintf("too-many-headers: headers message size = %u",
4200  nCount));
4201  return;
4202  }
4203  headers.resize(nCount);
4204  for (unsigned int n = 0; n < nCount; n++) {
4205  vRecv >> headers[n];
4206  // Ignore tx count; assume it is 0.
4207  ReadCompactSize(vRecv);
4208  }
4209 
4210  return ProcessHeadersMessage(config, pfrom, headers,
4211  /*via_compact_block=*/false);
4212  }
4213 
4214  if (msg_type == NetMsgType::BLOCK) {
4215  // Ignore block received while importing
4216  if (fImporting || fReindex) {
4218  "Unexpected block message received from peer %d\n",
4219  pfrom.GetId());
4220  return;
4221  }
4222 
4223  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4224  vRecv >> *pblock;
4225 
4226  LogPrint(BCLog::NET, "received block %s peer=%d\n",
4227  pblock->GetHash().ToString(), pfrom.GetId());
4228 
4229  // Process all blocks from whitelisted peers, even if not requested,
4230  // unless we're still syncing with the network. Such an unrequested
4231  // block may still be processed, subject to the conditions in
4232  // AcceptBlock().
4233  bool forceProcessing = pfrom.HasPermission(PF_NOBAN) &&
4235  const BlockHash hash = pblock->GetHash();
4236  {
4237  LOCK(cs_main);
4238  // Also always process if we requested the block explicitly, as we
4239  // may need it even though it is not a candidate for a new best tip.
4240  forceProcessing |= MarkBlockAsReceived(hash);
4241  // mapBlockSource is only used for punishing peers and setting
4242  // which peers send us compact blocks, so the race between here and
4243  // cs_main in ProcessNewBlock is fine.
4244  mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
4245  }
4246  bool fNewBlock = false;
4247  m_chainman.ProcessNewBlock(config, pblock, forceProcessing, &fNewBlock);
4248  if (fNewBlock) {
4249  pfrom.nLastBlockTime = GetTime();
4250  } else {
4251  LOCK(cs_main);
4252  mapBlockSource.erase(hash);
4253  }
4254  return;
4255  }
4256 
4257  if (msg_type == NetMsgType::AVAHELLO) {
4258  if (!pfrom.m_avalanche_state) {
4259  pfrom.m_avalanche_state = std::make_unique<CNode::AvalancheState>();
4260  }
4261 
4262  CHashVerifier<CDataStream> verifier(&vRecv);
4263  avalanche::Delegation delegation;
4264  verifier >> delegation;
4265 
4267  CPubKey &pubkey = pfrom.m_avalanche_state->pubkey;
4268  if (!delegation.verify(state, pubkey)) {
4269  Misbehaving(pfrom, 100, "invalid-delegation");
4270  return;
4271  }
4272 
4273  CHashWriter sighasher(SER_GETHASH, 0);
4274  sighasher << delegation.getId();
4275  sighasher << pfrom.nRemoteHostNonce;
4276  sighasher << pfrom.GetLocalNonce();
4277  sighasher << pfrom.nRemoteExtraEntropy;
4278  sighasher << pfrom.GetLocalExtraEntropy();
4279 
4280  SchnorrSig sig;
4281  verifier >> sig;
4282  if (!pubkey.VerifySchnorr(sighasher.GetHash(), sig)) {
4283  Misbehaving(pfrom, 100, "invalid-avahello-signature");
4284  return;
4285  }
4286 
4287  // If we don't know this proof already, add it to the tracker so it can
4288  // be requested.
4289  const avalanche::ProofId proofid(delegation.getProofId());
4290  if (!AlreadyHaveProof(proofid)) {
4291  const bool preferred = isPreferredDownloadPeer(pfrom);
4293  AddProofAnnouncement(pfrom, proofid,
4294  GetTime<std::chrono::microseconds>(),
4295  preferred);
4296  }
4297 
4298  if (gArgs.GetBoolArg("-enableavalanchepeerdiscovery",
4300  // Don't check the return value. If it fails we probably don't know
4301  // about the proof yet.
4302  g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
4303  return pm.addNode(pfrom.GetId(), proofid);
4304  });
4305  }
4306 
4307  return;
4308  }
4309 
4310  if (msg_type == NetMsgType::AVAPOLL) {
4311  auto now = std::chrono::steady_clock::now();
4312  int64_t cooldown =
4313  gArgs.GetArg("-avacooldown", AVALANCHE_DEFAULT_COOLDOWN);
4314 
4315  {
4316  LOCK(cs_main);
4317  auto &node_state = State(pfrom.GetId())->m_avalanche_state;
4318 
4319  if (now <
4320  node_state.last_poll + std::chrono::milliseconds(cooldown)) {
4321  Misbehaving(pfrom, 20, "avapool-cooldown");
4322  }
4323 
4324  node_state.last_poll = now;
4325  }
4326 
4327  uint64_t round;
4328  Unserialize(vRecv, round);
4329 
4330  unsigned int nCount = ReadCompactSize(vRecv);
4331  if (nCount > AVALANCHE_MAX_ELEMENT_POLL) {
4332  Misbehaving(
4333  pfrom, 20,
4334  strprintf("too-many-ava-poll: poll message size = %u", nCount));
4335  return;
4336  }
4337 
4338  std::vector<avalanche::Vote> votes;
4339  votes.reserve(nCount);
4340 
4341  LogPrint(BCLog::AVALANCHE, "received avalanche poll from peer=%d\n",
4342  pfrom.GetId());
4343 
4344  {
4345  LOCK(cs_main);
4346 
4347  for (unsigned int n = 0; n < nCount; n++) {
4348  CInv inv;
4349  vRecv >> inv;
4350 
4351  const auto insertVote = [&](uint32_t e) {
4352  votes.emplace_back(e, inv.hash);
4353  };
4354 
4355  // Not a block.
4356  if (inv.type != MSG_BLOCK) {
4357  insertVote(-1);
4358  continue;
4359  }
4360 
4361  // We have a block.
4362  const CBlockIndex *pindex =
4363  LookupBlockIndex(BlockHash(inv.hash));
4364 
4365  // Unknown block.
4366  if (!pindex) {
4367  insertVote(-1);
4368  continue;
4369  }
4370 
4371  // Invalid block
4372  if (pindex->nStatus.isInvalid()) {
4373  insertVote(1);
4374  continue;
4375  }
4376 
4377  // Parked block
4378  if (pindex->nStatus.isOnParkedChain()) {
4379  insertVote(2);
4380  continue;
4381  }
4382 
4383  const CBlockIndex *pindexTip = ::ChainActive().Tip();
4384  const CBlockIndex *pindexFork =
4385  LastCommonAncestor(pindex, pindexTip);
4386 
4387  // Active block.
4388  if (pindex == pindexFork) {
4389  insertVote(0);
4390  continue;
4391  }
4392 
4393  // Fork block.
4394  if (pindexFork != pindexTip) {
4395  insertVote(3);
4396  continue;
4397  }
4398 
4399  // Missing block data.
4400  if (!pindex->nStatus.hasData()) {
4401  insertVote(-2);
4402  continue;
4403  }
4404 
4405  // This block is built on top of the tip, we have the data, it
4406  // is pending connection or rejection.
4407  insertVote(-3);
4408  }
4409  }
4410 
4411  // Send the query to the node.
4412  g_avalanche->sendResponse(
4413  &pfrom, avalanche::Response(round, cooldown, std::move(votes)));
4414  return;
4415  }
4416 
4417  if (msg_type == NetMsgType::AVARESPONSE) {
4418  // As long as QUIC is not implemented, we need to sign response and
4419  // verify response's signatures in order to avoid any manipulation of
4420  // messages at the transport level.
4421  CHashVerifier<CDataStream> verifier(&vRecv);
4423  verifier >> response;
4424 
4425  SchnorrSig sig;
4426  vRecv >> sig;
4427  if (!pfrom.m_avalanche_state ||
4428  !pfrom.m_avalanche_state->pubkey.VerifySchnorr(verifier.GetHash(),
4429  sig)) {
4430  Misbehaving(pfrom, 100, "invalid-ava-response-signature");
4431  return;
4432  }
4433 
4434  std::vector<avalanche::BlockUpdate> blockUpdates;
4435  std::vector<avalanche::ProofUpdate> proofUpdates;
4436  int banscore;
4437  std::string error;
4438  if (!g_avalanche->registerVotes(pfrom.GetId(), response, blockUpdates,
4439  proofUpdates, banscore, error)) {
4440  Misbehaving(pfrom, banscore, error);
4441  return;
4442  }
4443 
4444  pfrom.m_avalanche_state->invsVoted(response.GetVotes().size());
4445 
4446  if (blockUpdates.size()) {
4447  for (avalanche::BlockUpdate &u : blockUpdates) {
4448  CBlockIndex *pindex = u.getVoteItem();
4449  switch (u.getStatus()) {
4452  LogPrintf("Avalanche rejected %s, parking\n",
4453  pindex->GetBlockHash().GetHex());
4454  BlockValidationState state;
4455  ::ChainstateActive().ParkBlock(config, state, pindex);
4456  if (!state.IsValid()) {
4457  LogPrintf("ERROR: Database error: %s\n",
4458  state.GetRejectReason());
4459  return;
4460  }
4461  } break;
4464  LogPrintf("Avalanche accepted %s\n",
4465  pindex->GetBlockHash().GetHex());
4466  LOCK(cs_main);
4467  UnparkBlock(pindex);
4468  } break;
4469  }
4470  }
4471 
4472  BlockValidationState state;
4473  if (!ActivateBestChain(config, state)) {
4474  LogPrintf("failed to activate chain (%s)\n", state.ToString());
4475  }
4476  }
4477 
4478  return;
4479  }
4480 
4481  if (msg_type == NetMsgType::AVAPROOF) {
4482  auto proof = std::make_shared<avalanche::Proof>();
4483  vRecv >> *proof;
4484  const avalanche::ProofId &proofid = proof->getId();
4485 
4486  pfrom.AddKnownProof(proofid);
4487 
4488  const NodeId nodeid = pfrom.GetId();
4489 
4490  {
4492  m_proofrequest.ReceivedResponse(nodeid, proofid);
4493 
4494  if (AlreadyHaveProof(proofid)) {
4495  m_proofrequest.ForgetInvId(proofid);
4496  return;
4497  }
4498  }
4499 
4500  // addProof should not be called while cs_proofrequest because it holds
4501  // cs_main and that creates a potential deadlock during shutdown
4502 
4503  if (g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
4504  return pm.registerProof(proof);
4505  })) {
4506  WITH_LOCK(cs_proofrequest, m_proofrequest.ForgetInvId(proofid));
4507  RelayProof(proofid, m_connman);
4508 
4509  pfrom.nLastProofTime = GetTime();
4510 
4511  LogPrint(BCLog::NET, "New avalanche proof: peer=%d, proofid %s\n",
4512  nodeid, proofid.ToString());
4513  } else {
4514  // If the proof couldn't be added, it can be either orphan or
4515  // invalid. In the latter case we should increase the ban score.
4516  // TODO improve the ban reason by printing the validation state
4517  if (!g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
4518  return pm.isOrphan(proofid);
4519  })) {
4520  WITH_LOCK(cs_rejectedProofs, rejectedProofs->insert(proofid));
4521  Misbehaving(nodeid, 100, "invalid-avaproof");
4522  }
4523  }
4524  return;
4525  }
4526 
4527  if (msg_type == NetMsgType::GETADDR) {
4528  // This asymmetric behavior for inbound and outbound connections was
4529  // introduced to prevent a fingerprinting attack: an attacker can send
4530  // specific fake addresses to users' AddrMan and later request them by
4531  // sending getaddr messages. Making nodes which are behind NAT and can
4532  // only make outgoing connections ignore the getaddr message mitigates
4533  // the attack.
4534  if (!pfrom.IsInboundConn()) {
4536  "Ignoring \"getaddr\" from %s connection. peer=%d\n",
4537  pfrom.ConnectionTypeAsString(), pfrom.GetId());
4538  return;
4539  }
4540 
4541  // Only send one GetAddr response per connection to reduce resource
4542  // waste and discourage addr stamping of INV announcements.
4543  if (pfrom.fSentAddr) {
4544  LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
4545  pfrom.GetId());
4546  return;
4547  }
4548  pfrom.fSentAddr = true;
4549 
4550  pfrom.vAddrToSend.clear();
4551  std::vector<CAddress> vAddr;
4552  if (pfrom.HasPermission(PF_ADDR)) {
4553  vAddr =
4555  } else {
4556  vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND,
4558  }
4559  FastRandomContext insecure_rand;
4560  for (const CAddress &addr : vAddr) {
4561  pfrom.PushAddress(addr, insecure_rand);
4562  }
4563  return;
4564  }
4565 
4566  if (msg_type == NetMsgType::MEMPOOL) {
4567  if (!(pfrom.GetLocalServices() & NODE_BLOOM) &&
4568  !pfrom.HasPermission(PF_MEMPOOL)) {
4569  if (!pfrom.HasPermission(PF_NOBAN)) {
4571  "mempool request with bloom filters disabled, "
4572  "disconnect peer=%d\n",
4573  pfrom.GetId());
4574  pfrom.fDisconnect = true;
4575  }
4576  return;
4577  }
4578 
4579  if (m_connman.OutboundTargetReached(false) &&
4580  !pfrom.HasPermission(PF_MEMPOOL)) {
4581  if (!pfrom.HasPermission(PF_NOBAN)) {
4583  "mempool request with bandwidth limit reached, "
4584  "disconnect peer=%d\n",
4585  pfrom.GetId());
4586  pfrom.fDisconnect = true;
4587  }
4588  return;
4589  }
4590 
4591  if (pfrom.m_tx_relay != nullptr) {
4592  LOCK(pfrom.m_tx_relay->cs_tx_inventory);
4593  pfrom.m_tx_relay->fSendMempool = true;
4594  }
4595  return;
4596  }
4597 
4598  if (msg_type == NetMsgType::PING) {
4599  if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
4600  uint64_t nonce = 0;
4601  vRecv >> nonce;
4602  // Echo the message back with the nonce. This allows for two useful
4603  // features:
4604  //
4605  // 1) A remote node can quickly check if the connection is
4606  // operational.
4607  // 2) Remote nodes can measure the latency of the network thread. If
4608  // this node is overloaded it won't respond to pings quickly and the
4609  // remote node can avoid sending us more work, like chain download
4610  // requests.
4611  //
4612  // The nonce stops the remote getting confused between different
4613  // pings: without it, if the remote node sends a ping once per
4614  // second and this node takes 5 seconds to respond to each, the 5th
4615  // ping the remote sends would appear to return very quickly.
4616  m_connman.PushMessage(&pfrom,
4617  msgMaker.Make(NetMsgType::PONG, nonce));
4618  }
4619  return;
4620  }
4621 
4622  if (msg_type == NetMsgType::PONG) {
4623  const auto ping_end = time_received;
4624  uint64_t nonce = 0;
4625  size_t nAvail = vRecv.in_avail();
4626  bool bPingFinished = false;
4627  std::string sProblem;
4628 
4629  if (nAvail >= sizeof(nonce)) {
4630  vRecv >> nonce;
4631 
4632  // Only process pong message if there is an outstanding ping (old
4633  // ping without nonce should never pong)
4634  if (pfrom.nPingNonceSent != 0) {
4635  if (nonce == pfrom.nPingNonceSent) {
4636  // Matching pong received, this ping is no longer
4637  // outstanding
4638  bPingFinished = true;
4639  const auto ping_time = ping_end - pfrom.m_ping_start.load();
4640  if (ping_time.count() >= 0) {
4641  // Successful ping time measurement, replace previous
4642  pfrom.nPingUsecTime = count_microseconds(ping_time);
4643  pfrom.nMinPingUsecTime =
4644  std::min(pfrom.nMinPingUsecTime.load(),
4645  count_microseconds(ping_time));
4646  } else {
4647  // This should never happen
4648  sProblem = "Timing mishap";
4649  }
4650  } else {
4651  // Nonce mismatches are normal when pings are overlapping
4652  sProblem = "Nonce mismatch";
4653  if (nonce == 0) {
4654  // This is most likely a bug in another implementation
4655  // somewhere; cancel this ping
4656  bPingFinished = true;
4657  sProblem = "Nonce zero";
4658  }
4659  }
4660  } else {
4661  sProblem = "Unsolicited pong without ping";
4662  }
4663  } else {
4664  // This is most likely a bug in another implementation somewhere;
4665  // cancel this ping
4666  bPingFinished = true;
4667  sProblem = "Short payload";
4668  }
4669 
4670  if (!(sProblem.empty())) {
4672  "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
4673  pfrom.GetId(), sProblem, pfrom.nPingNonceSent, nonce,
4674  nAvail);
4675  }
4676  if (bPingFinished) {
4677  pfrom.nPingNonceSent = 0;
4678  }
4679  return;
4680  }
4681 
4682  if (msg_type == NetMsgType::FILTERLOAD) {
4683  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
4684  pfrom.fDisconnect = true;
4685  return;
4686  }
4687  CBloomFilter filter;
4688  vRecv >> filter;
4689 
4690  if (!filter.IsWithinSizeConstraints()) {
4691  // There is no excuse for sending a too-large filter
4692  Misbehaving(pfrom, 100, "too-large bloom filter");
4693  } else if (pfrom.m_tx_relay != nullptr) {
4694  LOCK(pfrom.m_tx_relay->cs_filter);
4695  pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter));
4696  pfrom.m_tx_relay->fRelayTxes = true;
4697  }
4698  return;
4699  }
4700 
4701  if (msg_type == NetMsgType::FILTERADD) {
4702  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
4703  pfrom.fDisconnect = true;
4704  return;
4705  }
4706  std::vector<uint8_t> vData;
4707  vRecv >> vData;
4708 
4709  // Nodes must NEVER send a data item > 520 bytes (the max size for a
4710  // script data object, and thus, the maximum size any matched object can
4711  // have) in a filteradd message.
4712  bool bad = false;
4713  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
4714  bad = true;
4715  } else if (pfrom.m_tx_relay != nullptr) {
4716  LOCK(pfrom.m_tx_relay->cs_filter);
4717  if (pfrom.m_tx_relay->pfilter) {
4718  pfrom.m_tx_relay->pfilter->insert(vData);
4719  } else {
4720  bad = true;
4721  }
4722  }
4723  if (bad) {
4724  // The structure of this code doesn't really allow for a good error
4725  // code. We'll go generic.
4726  Misbehaving(pfrom, 100, "bad filteradd message");
4727  }
4728  return;
4729  }
4730 
4731  if (msg_type == NetMsgType::FILTERCLEAR) {
4732  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
4733  pfrom.fDisconnect = true;
4734  return;
4735  }
4736  if (pfrom.m_tx_relay == nullptr) {
4737  return;
4738  }
4739  LOCK(pfrom.m_tx_relay->cs_filter);
4740  pfrom.m_tx_relay->pfilter = nullptr;
4741  pfrom.m_tx_relay->fRelayTxes = true;
4742  return;
4743  }
4744 
4745  if (msg_type == NetMsgType::FEEFILTER) {
4746  Amount newFeeFilter = Amount::zero();
4747  vRecv >> newFeeFilter;
4748  if (MoneyRange(newFeeFilter)) {
4749  if (pfrom.m_tx_relay != nullptr) {
4750  LOCK(pfrom.m_tx_relay->cs_feeFilter);
4751  pfrom.m_tx_relay->minFeeFilter = newFeeFilter;
4752  }
4753  LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
4754  CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
4755  }
4756  return;
4757  }
4758 
4759  if (msg_type == NetMsgType::GETCFILTERS) {
4760  ProcessGetCFilters(pfrom, vRecv, m_chainparams, m_connman);
4761  return;
4762  }
4763 
4764  if (msg_type == NetMsgType::GETCFHEADERS) {
4766  return;
4767  }
4768 
4769  if (msg_type == NetMsgType::GETCFCHECKPT) {
4771  return;
4772  }
4773 
4774  if (msg_type == NetMsgType::NOTFOUND) {
4775  std::vector<CInv> vInv;
4776  vRecv >> vInv;
4777  // A peer might send up to 1 notfound per getdata request, but no more
4778  if (vInv.size() <= PROOF_REQUEST_PARAMS.max_peer_announcements +
4781  for (CInv &inv : vInv) {
4782  if (inv.IsMsgTx()) {
4783  // If we receive a NOTFOUND message for a tx we requested,
4784  // mark the announcement for it as completed in
4785  // InvRequestTracker.
4786  LOCK(::cs_main);
4787  m_txrequest.ReceivedResponse(pfrom.GetId(), TxId(inv.hash));
4788  continue;
4789  }
4790  if (inv.IsMsgProof()) {
4792  m_proofrequest.ReceivedResponse(
4793  pfrom.GetId(), avalanche::ProofId(inv.hash));
4794  }
4795  }
4796  }
4797  return;
4798  }
4799 
4800  // Ignore unknown commands for extensibility
4801  LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
4802  SanitizeString(msg_type), pfrom.GetId());
4803  return;
4804 }
4805 
4807  const NodeId peer_id{pnode.GetId()};
4808  PeerRef peer = GetPeerRef(peer_id);
4809  if (peer == nullptr) {
4810  return false;
4811  }
4812  {
4813  LOCK(peer->m_misbehavior_mutex);
4814 
4815  // There's nothing to do if the m_should_discourage flag isn't set
4816  if (!peer->m_should_discourage) {
4817  return false;
4818  }
4819 
4820  peer->m_should_discourage = false;
4821  } // peer.m_misbehavior_mutex
4822 
4823  if (pnode.HasPermission(PF_NOBAN)) {
4824  // We never disconnect or discourage peers for bad behavior if they have
4825  // the NOBAN permission flag
4826  LogPrintf("Warning: not punishing noban peer %d!\n", peer_id);
4827  return false;
4828  }
4829 
4830  if (pnode.IsManualConn()) {
4831  // We never disconnect or discourage manual peers for bad behavior
4832  LogPrintf("Warning: not punishing manually connected peer %d!\n",
4833  peer_id);
4834  return false;
4835  }
4836 
4837  if (pnode.addr.IsLocal()) {
4838  // We disconnect local peers for bad behavior but don't discourage
4839  // (since that would discourage all peers on the same local address)
4840  LogPrintf(
4841  "Warning: disconnecting but not discouraging local peer %d!\n",
4842  peer_id);
4843  pnode.fDisconnect = true;
4844  return true;
4845  }
4846 
4847  // Normal case: Disconnect the peer and discourage all nodes sharing the
4848  // address
4849  LogPrintf("Disconnecting and discouraging peer %d!\n", peer_id);
4850  if (m_banman) {
4851  m_banman->Discourage(pnode.addr);
4852  }
4853  m_connman.DisconnectNode(pnode.addr);
4854  return true;
4855 }
4856 
4857 bool PeerManager::ProcessMessages(const Config &config, CNode *pfrom,
4858  std::atomic<bool> &interruptMsgProc) {
4859  //
4860  // Message format
4861  // (4) message start
4862  // (12) command
4863  // (4) size
4864  // (4) checksum
4865  // (x) data
4866  //
4867  bool fMoreWork = false;
4868 
4869  PeerRef peer = GetPeerRef(pfrom->GetId());
4870  if (peer == nullptr) {
4871  return false;
4872  }
4873 
4874  {
4875  LOCK(peer->m_getdata_requests_mutex);
4876  if (!peer->m_getdata_requests.empty()) {
4877  ProcessGetData(config, *pfrom, *peer, m_connman, m_mempool,
4878  interruptMsgProc);
4879  }
4880  }
4881 
4882  {
4884  if (!peer->m_orphan_work_set.empty()) {
4885  ProcessOrphanTx(config, peer->m_orphan_work_set);
4886  }
4887  }
4888 
4889  if (pfrom->fDisconnect) {
4890  return false;
4891  }
4892 
4893  // this maintains the order of responses and prevents m_getdata_requests
4894  // from growing unbounded
4895  {
4896  LOCK(peer->m_getdata_requests_mutex);
4897  if (!peer->m_getdata_requests.empty()) {
4898  return true;
4899  }
4900  }
4901 
4902  {
4903  LOCK(g_cs_orphans);
4904  if (!peer->m_orphan_work_set.empty()) {
4905  return true;
4906  }
4907  }
4908 
4909  // Don't bother if send buffer is too full to respond anyway
4910  if (pfrom->fPauseSend) {
4911  return false;
4912  }
4913 
4914  std::list<CNetMessage> msgs;
4915  {
4916  LOCK(pfrom->cs_vProcessMsg);
4917  if (pfrom->vProcessMsg.empty()) {
4918  return false;
4919  }
4920  // Just take one message
4921  msgs.splice(msgs.begin(), pfrom->vProcessMsg,
4922  pfrom->vProcessMsg.begin());
4923  pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
4924  pfrom->fPauseRecv =
4926  fMoreWork = !pfrom->vProcessMsg.empty();
4927  }
4928  CNetMessage &msg(msgs.front());
4929 
4930  msg.SetVersion(pfrom->GetCommonVersion());
4931 
4932  // Check network magic
4933  if (!msg.m_valid_netmagic) {
4935  "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
4936  SanitizeString(msg.m_command), pfrom->GetId());
4937 
4938  // Make sure we discourage where that come from for some time.
4939  if (m_banman) {
4940  m_banman->Discourage(pfrom->addr);
4941  }
4942  m_connman.DisconnectNode(pfrom->addr);
4943 
4944  pfrom->fDisconnect = true;
4945  return false;
4946  }
4947 
4948  // Check header
4949  if (!msg.m_valid_header) {
4950  LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
4951  SanitizeString(msg.m_command), pfrom->GetId());
4952  return fMoreWork;
4953  }
4954  const std::string &msg_type = msg.m_command;
4955 
4956  // Message size
4957  unsigned int nMessageSize = msg.m_message_size;
4958 
4959  // Checksum
4960  CDataStream &vRecv = msg.m_recv;
4961  if (!msg.m_valid_checksum) {
4962  LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n",
4963  __func__, SanitizeString(msg_type), nMessageSize,
4964  pfrom->GetId());
4965  if (m_banman) {
4966  m_banman->Discourage(pfrom->addr);
4967  }
4968  m_connman.DisconnectNode(pfrom->addr);
4969  return fMoreWork;
4970  }
4971 
4972  try {
4973  ProcessMessage(config, *pfrom, msg_type, vRecv, msg.m_time,
4974  interruptMsgProc);
4975  if (interruptMsgProc) {
4976  return false;
4977  }
4978 
4979  {
4980  LOCK(peer->m_getdata_requests_mutex);
4981  if (!peer->m_getdata_requests.empty()) {
4982  fMoreWork = true;
4983  }
4984  }
4985  } catch (const std::exception &e) {
4986  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n",
4987  __func__, SanitizeString(msg_type), nMessageSize, e.what(),
4988  typeid(e).name());
4989  } catch (...) {
4990  LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n",
4991  __func__, SanitizeString(msg_type), nMessageSize);
4992  }
4993 
4994  return fMoreWork;
4995 }
4996 
4997 void PeerManager::ConsiderEviction(CNode &pto, int64_t time_in_seconds) {
4999 
5000  CNodeState &state = *State(pto.GetId());
5001  const CNetMsgMaker msgMaker(pto.GetCommonVersion());
5002 
5003  if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() &&
5004  state.fSyncStarted) {
5005  // This is an outbound peer subject to disconnection if they don't
5006  // announce a block with as much work as the current tip within
5007  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their
5008  // chain has more work than ours, we should sync to it, unless it's
5009  // invalid, in which case we should find that out and disconnect from
5010  // them elsewhere).
5011  if (state.pindexBestKnownBlock != nullptr &&
5012  state.pindexBestKnownBlock->nChainWork >=
5013  ::ChainActive().Tip()->nChainWork) {
5014  if (state.m_chain_sync.m_timeout != 0) {
5015  state.m_chain_sync.m_timeout = 0;
5016  state.m_chain_sync.m_work_header = nullptr;
5017  state.m_chain_sync.m_sent_getheaders = false;
5018  }
5019  } else if (state.m_chain_sync.m_timeout == 0 ||
5020  (state.m_chain_sync.m_work_header != nullptr &&
5021  state.pindexBestKnownBlock != nullptr &&
5022  state.pindexBestKnownBlock->nChainWork >=
5023  state.m_chain_sync.m_work_header->nChainWork)) {
5024  // Our best block known by this peer is behind our tip, and we're
5025  // either noticing that for the first time, OR this peer was able to
5026  // catch up to some earlier point where we checked against our tip.
5027  // Either way, set a new timeout based on current tip.
5028  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
5029  state.m_chain_sync.m_work_header = ::ChainActive().Tip();
5030  state.m_chain_sync.m_sent_getheaders = false;
5031  } else if (state.m_chain_sync.m_timeout > 0 &&
5032  time_in_seconds > state.m_chain_sync.m_timeout) {
5033  // No evidence yet that our peer has synced to a chain with work
5034  // equal to that of our tip, when we first detected it was behind.
5035  // Send a single getheaders message to give the peer a chance to
5036  // update us.
5037  if (state.m_chain_sync.m_sent_getheaders) {
5038  // They've run out of time to catch up!
5039  LogPrintf(
5040  "Disconnecting outbound peer %d for old chain, best known "
5041  "block = %s\n",
5042  pto.GetId(),
5043  state.pindexBestKnownBlock != nullptr
5044  ? state.pindexBestKnownBlock->GetBlockHash().ToString()
5045  : "<none>");
5046  pto.fDisconnect = true;
5047  } else {
5048  assert(state.m_chain_sync.m_work_header);
5049  LogPrint(
5050  BCLog::NET,
5051  "sending getheaders to outbound peer=%d to verify chain "
5052  "work (current best known block:%s, benchmark blockhash: "
5053  "%s)\n",
5054  pto.GetId(),
5055  state.pindexBestKnownBlock != nullptr
5056  ? state.pindexBestKnownBlock->GetBlockHash().ToString()
5057  : "<none>",
5058  state.m_chain_sync.m_work_header->GetBlockHash()
5059  .ToString());
5061  &pto,
5062  msgMaker.Make(NetMsgType::GETHEADERS,
5063  ::ChainActive().GetLocator(
5064  state.m_chain_sync.m_work_header->pprev),
5065  uint256()));
5066  state.m_chain_sync.m_sent_getheaders = true;
5067  // 2 minutes
5068  constexpr int64_t HEADERS_RESPONSE_TIME = 120;
5069  // Bump the timeout to allow a response, which could clear the
5070  // timeout (if the response shows the peer has synced), reset
5071  // the timeout (if the peer syncs to the required work but not
5072  // to our tip), or result in disconnect (if we advance to the
5073  // timeout and pindexBestKnownBlock has not sufficiently
5074  // progressed)
5075  state.m_chain_sync.m_timeout =
5076  time_in_seconds + HEADERS_RESPONSE_TIME;
5077  }
5078  }
5079  }
5080 }
5081 
5082 void PeerManager::EvictExtraOutboundPeers(int64_t time_in_seconds) {
5083  // Check whether we have too many outbound peers
5084  int extra_peers = m_connman.GetExtraOutboundCount();
5085  if (extra_peers <= 0) {
5086  return;
5087  }
5088 
5089  // If we have more outbound peers than we target, disconnect one.
5090  // Pick the outbound peer that least recently announced us a new block, with
5091  // ties broken by choosing the more recent connection (higher node id)
5092  NodeId worst_peer = -1;
5093  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
5094 
5096  ::cs_main) {
5098 
5099  // Ignore non-outbound peers, or nodes marked for disconnect already
5100  if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) {
5101  return;
5102  }
5103  CNodeState *state = State(pnode->GetId());
5104  if (state == nullptr) {
5105  // shouldn't be possible, but just in case
5106  return;
5107  }
5108  // Don't evict our protected peers
5109  if (state->m_chain_sync.m_protect) {
5110  return;
5111  }
5112  // Don't evict our block-relay-only peers.
5113  if (pnode->m_tx_relay == nullptr) {
5114  return;
5115  }
5116 
5117  if (state->m_last_block_announcement < oldest_block_announcement ||
5118  (state->m_last_block_announcement == oldest_block_announcement &&
5119  pnode->GetId() > worst_peer)) {
5120  worst_peer = pnode->GetId();
5121  oldest_block_announcement = state->m_last_block_announcement;
5122  }
5123  });
5124 
5125  if (worst_peer == -1) {
5126  return;
5127  }
5128 
5129  bool disconnected = m_connman.ForNode(
5130  worst_peer, [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5132 
5133  // Only disconnect a peer that has been connected to us for some
5134  // reasonable fraction of our check-frequency, to give it time for
5135  // new information to have arrived. Also don't disconnect any peer
5136  // we're trying to download a block from.
5137  CNodeState &state = *State(pnode->GetId());
5138  if (time_in_seconds - pnode->nTimeConnected >
5140  state.nBlocksInFlight == 0) {
5142  "disconnecting extra outbound peer=%d (last block "
5143  "announcement received at time %d)\n",
5144  pnode->GetId(), oldest_block_announcement);
5145  pnode->fDisconnect = true;
5146  return true;
5147  } else {
5149  "keeping outbound peer=%d chosen for eviction "
5150  "(connect time: %d, blocks_in_flight: %d)\n",
5151  pnode->GetId(), pnode->nTimeConnected,
5152  state.nBlocksInFlight);
5153  return false;
5154  }
5155  });
5156 
5157  if (disconnected) {
5158  // If we disconnected an extra peer, that means we successfully
5159  // connected to at least one peer after the last time we detected a
5160  // stale tip. Don't try any more extra peers until we next detect a
5161  // stale tip, to limit the load we put on the network from these extra
5162  // connections.
5164  }
5165 }
5166 
5168  LOCK(cs_main);
5169 
5170  int64_t time_in_seconds = GetTime();
5171 
5172  EvictExtraOutboundPeers(time_in_seconds);
5173 
5174  if (time_in_seconds <= m_stale_tip_check_time) {
5175  return;
5176  }
5177 
5178  // Check whether our tip is stale, and if so, allow using an extra outbound
5179  // peer.
5182  TipMayBeStale(m_chainparams.GetConsensus())) {
5183  LogPrintf("Potential stale tip detected, will try using extra outbound "
5184  "peer (last tip update: %d seconds ago)\n",
5185  time_in_seconds - g_last_tip_update);
5187  } else if (m_connman.GetTryNewOutboundPeer()) {
5189  }
5190  m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
5191 }
5192 
5193 namespace {
5194 class CompareInvMempoolOrder {
5195  CTxMemPool *mp;
5196 
5197 public:
5198  explicit CompareInvMempoolOrder(CTxMemPool *_mempool) { mp = _mempool; }
5199 
5200  bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
5205  return mp->CompareDepthAndScore(*b, *a);
5206  }
5207 };
5208 } // namespace
5209 
5210 bool PeerManager::SendMessages(const Config &config, CNode *pto,
5211  std::atomic<bool> &interruptMsgProc) {
5212  const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
5213 
5214  // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
5215  // disconnect misbehaving peers even before the version handshake is
5216  // complete.
5217  if (MaybeDiscourageAndDisconnect(*pto)) {
5218  return true;
5219  }
5220 
5221  // Don't send anything until the version handshake is complete
5222  if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
5223  return true;
5224  }
5225 
5226  // If we get here, the outgoing message serialization version is set and
5227  // can't change.
5228  const CNetMsgMaker msgMaker(pto->GetCommonVersion());
5229 
5230  //
5231  // Message: ping
5232  //
5233  bool pingSend = false;
5234  if (pto->fPingQueued) {
5235  // RPC ping request by user
5236  pingSend = true;
5237  }
5238  if (pto->nPingNonceSent == 0 && pto->m_ping_start.load() + PING_INTERVAL <
5239  GetTime<std::chrono::microseconds>()) {
5240  // Ping automatically sent as a latency probe & keepalive.
5241  pingSend = true;
5242  }
5243  if (pingSend) {
5244  uint64_t nonce = 0;
5245  while (nonce == 0) {
5246  GetRandBytes((uint8_t *)&nonce, sizeof(nonce));
5247  }
5248  pto->fPingQueued = false;
5249  pto->m_ping_start = GetTime<std::chrono::microseconds>();
5250  if (pto->GetCommonVersion() > BIP0031_VERSION) {
5251  pto->nPingNonceSent = nonce;
5252  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
5253  } else {
5254  // Peer is too old to support ping command with nonce, pong will
5255  // never arrive.
5256  pto->nPingNonceSent = 0;
5257  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING));
5258  }
5259  }
5260 
5261  auto current_time = GetTime<std::chrono::microseconds>();
5262  bool fFetch;
5263 
5264  {
5265  LOCK(cs_main);
5266 
5267  CNodeState &state = *State(pto->GetId());
5268 
5269  // Address refresh broadcast
5270  if (pto->RelayAddrsWithConn() &&
5271  !::ChainstateActive().IsInitialBlockDownload() &&
5272  pto->m_next_local_addr_send < current_time) {
5273  AdvertiseLocal(pto);
5274  pto->m_next_local_addr_send = PoissonNextSend(
5275  current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
5276  }
5277 
5278  //
5279  // Message: addr
5280  //
5281  if (pto->RelayAddrsWithConn() && pto->m_next_addr_send < current_time) {
5282  pto->m_next_addr_send =
5284  std::vector<CAddress> vAddr;
5285  vAddr.reserve(pto->vAddrToSend.size());
5286  assert(pto->m_addr_known);
5287 
5288  const char *msg_type;
5289  int make_flags;
5290  if (pto->m_wants_addrv2) {
5291  msg_type = NetMsgType::ADDRV2;
5292  make_flags = ADDRV2_FORMAT;
5293  } else {
5294  msg_type = NetMsgType::ADDR;
5295  make_flags = 0;
5296  }
5297 
5298  for (const CAddress &addr : pto->vAddrToSend) {
5299  if (!pto->m_addr_known->contains(addr.GetKey())) {
5300  pto->m_addr_known->insert(addr.GetKey());
5301  vAddr.push_back(addr);
5302  // receiver rejects addr messages larger than
5303  // MAX_ADDR_TO_SEND
5304  if (vAddr.size() >= MAX_ADDR_TO_SEND) {
5306  pto, msgMaker.Make(make_flags, msg_type, vAddr));
5307  vAddr.clear();
5308  }
5309  }
5310  }
5311  pto->vAddrToSend.clear();
5312  if (!vAddr.empty()) {
5314  pto, msgMaker.Make(make_flags, msg_type, vAddr));
5315  }
5316 
5317  // we only send the big addr message once
5318  if (pto->vAddrToSend.capacity() > 40) {
5319  pto->vAddrToSend.shrink_to_fit();
5320  }
5321  }
5322 
5323  // Start block sync
5324  if (pindexBestHeader == nullptr) {
5326  }
5327 
5328  // Download if this is a nice peer, or we have no nice peers and this
5329  // one might do.
5330  fFetch = state.fPreferredDownload ||
5331  (nPreferredDownload == 0 && !pto->fClient &&
5332  !pto->IsAddrFetchConn());
5333 
5334  if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
5335  // Only actively request headers from a single peer, unless we're
5336  // close to today.
5337  if ((nSyncStarted == 0 && fFetch) ||
5339  GetAdjustedTime() - 24 * 60 * 60) {
5340  state.fSyncStarted = true;
5341  state.nHeadersSyncTimeout =
5342  count_microseconds(current_time) +
5346  (consensusParams.nPowTargetSpacing);
5347  nSyncStarted++;
5348  const CBlockIndex *pindexStart = pindexBestHeader;
5357  if (pindexStart->pprev) {
5358  pindexStart = pindexStart->pprev;
5359  }
5360 
5361  LogPrint(
5362  BCLog::NET,
5363  "initial getheaders (%d) to peer=%d (startheight:%d)\n",
5364  pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
5366  pto, msgMaker.Make(NetMsgType::GETHEADERS,
5367  ::ChainActive().GetLocator(pindexStart),
5368  uint256()));
5369  }
5370  }
5371 
5372  //
5373  // Try sending block announcements via headers
5374  //
5375  {
5376  // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
5377  // hashes we're relaying, and our peer wants headers announcements,
5378  // then find the first header not yet known to our peer but would
5379  // connect, and send. If no header would connect, or if we have too
5380  // many blocks, or if the peer doesn't want headers, just add all to
5381  // the inv queue.
5382  LOCK(pto->cs_inventory);
5383  std::vector<CBlock> vHeaders;
5384  bool fRevertToInv =
5385  ((!state.fPreferHeaders &&
5386  (!state.fPreferHeaderAndIDs ||
5387  pto->vBlockHashesToAnnounce.size() > 1)) ||
5388  pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
5389  // last header queued for delivery
5390  const CBlockIndex *pBestIndex = nullptr;
5391  // ensure pindexBestKnownBlock is up-to-date
5392  ProcessBlockAvailability(pto->GetId());
5393 
5394  if (!fRevertToInv) {
5395  bool fFoundStartingHeader = false;
5396  // Try to find first header that our peer doesn't have, and then
5397  // send all headers past that one. If we come across an headers
5398  // that aren't on ::ChainActive(), give up.
5399  for (const BlockHash &hash : pto->vBlockHashesToAnnounce) {
5400  const CBlockIndex *pindex = LookupBlockIndex(hash);
5401  assert(pindex);
5402  if (::ChainActive()[pindex->nHeight] != pindex) {
5403  // Bail out if we reorged away from this block
5404  fRevertToInv = true;
5405  break;
5406  }
5407  if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
5408  // This means that the list of blocks to announce don't
5409  // connect to each other. This shouldn't really be
5410  // possible to hit during regular operation (because
5411  // reorgs should take us to a chain that has some block
5412  // not on the prior chain, which should be caught by the
5413  // prior check), but one way this could happen is by
5414  // using invalidateblock / reconsiderblock repeatedly on
5415  // the tip, causing it to be added multiple times to
5416  // vBlockHashesToAnnounce. Robustly deal with this rare
5417  // situation by reverting to an inv.
5418  fRevertToInv = true;
5419  break;
5420  }
5421  pBestIndex = pindex;
5422  if (fFoundStartingHeader) {
5423  // add this to the headers message
5424  vHeaders.push_back(pindex->GetBlockHeader());
5425  } else if (PeerHasHeader(&state, pindex)) {
5426  // Keep looking for the first new block.
5427  continue;
5428  } else if (pindex->pprev == nullptr ||
5429  PeerHasHeader(&state, pindex->pprev)) {
5430  // Peer doesn't have this header but they do have the
5431  // prior one. Start sending headers.
5432  fFoundStartingHeader = true;
5433  vHeaders.push_back(pindex->GetBlockHeader());
5434  } else {
5435  // Peer doesn't have this header or the prior one --
5436  // nothing will connect, so bail out.
5437  fRevertToInv = true;
5438  break;
5439  }
5440  }
5441  }
5442  if (!fRevertToInv && !vHeaders.empty()) {
5443  if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
5444  // We only send up to 1 block as header-and-ids, as
5445  // otherwise probably means we're doing an initial-ish-sync
5446  // or they're slow.
5448  "%s sending header-and-ids %s to peer=%d\n",
5449  __func__, vHeaders.front().GetHash().ToString(),
5450  pto->GetId());
5451 
5452  int nSendFlags = 0;
5453 
5454  bool fGotBlockFromCache = false;
5455  {
5457  if (most_recent_block_hash ==
5458  pBestIndex->GetBlockHash()) {
5459  CBlockHeaderAndShortTxIDs cmpctblock(
5460  *most_recent_block);
5462  pto, msgMaker.Make(nSendFlags,
5464  cmpctblock));
5465  fGotBlockFromCache = true;
5466  }
5467  }
5468  if (!fGotBlockFromCache) {
5469  CBlock block;
5470  bool ret = ReadBlockFromDisk(block, pBestIndex,
5471  consensusParams);
5472  assert(ret);
5473  CBlockHeaderAndShortTxIDs cmpctblock(block);
5475  pto,
5476  msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
5477  cmpctblock));
5478  }
5479  state.pindexBestHeaderSent = pBestIndex;
5480  } else if (state.fPreferHeaders) {
5481  if (vHeaders.size() > 1) {
5483  "%s: %u headers, range (%s, %s), to peer=%d\n",
5484  __func__, vHeaders.size(),
5485  vHeaders.front().GetHash().ToString(),
5486  vHeaders.back().GetHash().ToString(),
5487  pto->GetId());
5488  } else {
5490  "%s: sending header %s to peer=%d\n", __func__,
5491  vHeaders.front().GetHash().ToString(),
5492  pto->GetId());
5493  }
5495  pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
5496  state.pindexBestHeaderSent = pBestIndex;
5497  } else {
5498  fRevertToInv = true;
5499  }
5500  }
5501  if (fRevertToInv) {
5502  // If falling back to using an inv, just try to inv the tip. The
5503  // last entry in vBlockHashesToAnnounce was our tip at some
5504  // point in the past.
5505  if (!pto->vBlockHashesToAnnounce.empty()) {
5506  const BlockHash &hashToAnnounce =
5507  pto->vBlockHashesToAnnounce.back();
5508  const CBlockIndex *pindex =
5509  LookupBlockIndex(hashToAnnounce);
5510  assert(pindex);
5511 
5512  // Warn if we're announcing a block that is not on the main
5513  // chain. This should be very rare and could be optimized
5514  // out. Just log for now.
5515  if (::ChainActive()[pindex->nHeight] != pindex) {
5516  LogPrint(
5517  BCLog::NET,
5518  "Announcing block %s not on main chain (tip=%s)\n",
5519  hashToAnnounce.ToString(),
5520  ::ChainActive().Tip()->GetBlockHash().ToString());
5521  }
5522 
5523  // If the peer's chain has this block, don't inv it back.
5524  if (!PeerHasHeader(&state, pindex)) {
5525  pto->vInventoryBlockToSend.push_back(hashToAnnounce);
5527  "%s: sending inv peer=%d hash=%s\n", __func__,
5528  pto->GetId(), hashToAnnounce.ToString());
5529  }
5530  }
5531  }
5532  pto->vBlockHashesToAnnounce.clear();
5533  }
5534  } // release cs_main
5535 
5536  //
5537  // Message: inventory
5538  //
5539  std::vector<CInv> vInv;
5540  auto addInvAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
5541  vInv.emplace_back(type, hash);
5542  if (vInv.size() == MAX_INV_SZ) {
5544  pto, msgMaker.Make(NetMsgType::INV, std::move(vInv)));
5545  vInv.clear();
5546  }
5547  };
5548 
5549  {
5550  LOCK2(cs_main, pto->cs_inventory);
5551 
5552  vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(),
5554  config.GetMaxBlockSize() / 1000000));
5555 
5556  // Add blocks
5557  for (const BlockHash &hash : pto->vInventoryBlockToSend) {
5558  addInvAndMaybeFlush(MSG_BLOCK, hash);
5559  }
5560  pto->vInventoryBlockToSend.clear();
5561 
5562  auto computeNextInvSendTime =
5563  [&](std::chrono::microseconds &next) -> bool {
5564  bool fSendTrickle = pto->HasPermission(PF_NOBAN);
5565 
5566  if (next < current_time) {
5567  fSendTrickle = true;
5568  if (pto->IsInboundConn()) {
5569  next = std::chrono::microseconds{
5571  count_microseconds(current_time),
5573  } else {
5574  // Skip delay for outbound peers, as there is less privacy
5575  // concern for them.
5576  next = current_time;
5577  }
5578  }
5579 
5580  return fSendTrickle;
5581  };
5582 
5583  // Add proofs to inventory
5584  if (pto->m_proof_relay != nullptr) {
5585  LOCK(pto->m_proof_relay->cs_proof_inventory);
5586 
5587  if (computeNextInvSendTime(pto->m_proof_relay->nextInvSend)) {
5588  auto it = pto->m_proof_relay->setInventoryProofToSend.begin();
5589  while (it !=
5590  pto->m_proof_relay->setInventoryProofToSend.end()) {
5591  const avalanche::ProofId proofid = *it;
5592 
5593  it = pto->m_proof_relay->setInventoryProofToSend.erase(it);
5594 
5595  if (pto->m_proof_relay->filterProofKnown.contains(
5596  proofid)) {
5597  continue;
5598  }
5599 
5600  pto->m_proof_relay->filterProofKnown.insert(proofid);
5601  addInvAndMaybeFlush(MSG_AVA_PROOF, proofid);
5602  State(pto->GetId())
5603  ->m_recently_announced_proofs.insert(proofid);
5604  }
5605  }
5606  }
5607 
5608  if (pto->m_tx_relay != nullptr) {
5609  LOCK(pto->m_tx_relay->cs_tx_inventory);
5610  // Check whether periodic sends should happen
5611  const bool fSendTrickle =
5612  computeNextInvSendTime(pto->m_tx_relay->nNextInvSend);
5613 
5614  // Time to send but the peer has requested we not relay
5615  // transactions.
5616  if (fSendTrickle) {
5617  LOCK(pto->m_tx_relay->cs_filter);
5618  if (!pto->m_tx_relay->fRelayTxes) {
5619  pto->m_tx_relay->setInventoryTxToSend.clear();
5620  }
5621  }
5622 
5623  // Respond to BIP35 mempool requests
5624  if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
5625  auto vtxinfo = m_mempool.infoAll();
5626  pto->m_tx_relay->fSendMempool = false;
5627  CFeeRate filterrate;
5628  {
5629  LOCK(pto->m_tx_relay->cs_feeFilter);
5630  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
5631  }
5632 
5633  LOCK(pto->m_tx_relay->cs_filter);
5634 
5635  for (const auto &txinfo : vtxinfo) {
5636  const TxId &txid = txinfo.tx->GetId();
5637  pto->m_tx_relay->setInventoryTxToSend.erase(txid);
5638  // Don't send transactions that peers will not put into
5639  // their mempool
5640  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5641  continue;
5642  }
5643  if (pto->m_tx_relay->pfilter &&
5644  !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(
5645  *txinfo.tx)) {
5646  continue;
5647  }
5648  pto->m_tx_relay->filterInventoryKnown.insert(txid);
5649  // Responses to MEMPOOL requests bypass the
5650  // m_recently_announced_invs filter.
5651  addInvAndMaybeFlush(MSG_TX, txid);
5652  }
5653  pto->m_tx_relay->m_last_mempool_req =
5654  GetTime<std::chrono::seconds>();
5655  }
5656 
5657  // Determine transactions to relay
5658  if (fSendTrickle) {
5659  // Produce a vector with all candidates for sending
5660  std::vector<std::set<TxId>::iterator> vInvTx;
5661  vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size());
5662  for (std::set<TxId>::iterator it =
5663  pto->m_tx_relay->setInventoryTxToSend.begin();
5664  it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) {
5665  vInvTx.push_back(it);
5666  }
5667  CFeeRate filterrate;
5668  {
5669  LOCK(pto->m_tx_relay->cs_feeFilter);
5670  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
5671  }
5672  // Topologically and fee-rate sort the inventory we send for
5673  // privacy and priority reasons. A heap is used so that not
5674  // all items need sorting if only a few are being sent.
5675  CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
5676  std::make_heap(vInvTx.begin(), vInvTx.end(),
5677  compareInvMempoolOrder);
5678  // No reason to drain out at many times the network's
5679  // capacity, especially since we have many peers and some
5680  // will draw much shorter delays.
5681  unsigned int nRelayedTransactions = 0;
5682  LOCK(pto->m_tx_relay->cs_filter);
5683  while (!vInvTx.empty() &&
5684  nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_MB *
5685  config.GetMaxBlockSize() /
5686  1000000) {
5687  // Fetch the top element from the heap
5688  std::pop_heap(vInvTx.begin(), vInvTx.end(),
5689  compareInvMempoolOrder);
5690  std::set<TxId>::iterator it = vInvTx.back();
5691  vInvTx.pop_back();
5692  const TxId txid = *it;
5693  // Remove it from the to-be-sent set
5694  pto->m_tx_relay->setInventoryTxToSend.erase(it);
5695  // Check if not in the filter already
5696  if (pto->m_tx_relay->filterInventoryKnown.contains(txid)) {
5697  continue;
5698  }
5699  // Not in the mempool anymore? don't bother sending it.
5700  auto txinfo = m_mempool.info(txid);
5701  if (!txinfo.tx) {
5702  continue;
5703  }
5704  // Peer told you to not send transactions at that
5705  // feerate? Don't bother sending it.
5706  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5707  continue;
5708  }
5709  if (pto->m_tx_relay->pfilter &&
5710  !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(
5711  *txinfo.tx)) {
5712  continue;
5713  }
5714  // Send
5715  State(pto->GetId())->m_recently_announced_invs.insert(txid);
5716  addInvAndMaybeFlush(MSG_TX, txid);
5717  nRelayedTransactions++;
5718  {
5719  // Expire old relay messages
5720  while (!vRelayExpiration.empty() &&
5721  vRelayExpiration.front().first <
5722  count_microseconds(current_time)) {
5723  mapRelay.erase(vRelayExpiration.front().second);
5724  vRelayExpiration.pop_front();
5725  }
5726 
5727  auto ret = mapRelay.insert(
5728  std::make_pair(txid, std::move(txinfo.tx)));
5729  if (ret.second) {
5730  vRelayExpiration.push_back(std::make_pair(
5731  count_microseconds(current_time) +
5732  std::chrono::microseconds{
5734  .count(),
5735  ret.first));
5736  }
5737  }
5738  pto->m_tx_relay->filterInventoryKnown.insert(txid);
5739  }
5740  }
5741  }
5742  } // release cs_main, pto->cs_inventory
5743 
5744  if (!vInv.empty()) {
5745  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
5746  }
5747 
5748  {
5749  LOCK(cs_main);
5750 
5751  CNodeState &state = *State(pto->GetId());
5752 
5753  // Detect whether we're stalling
5754  current_time = GetTime<std::chrono::microseconds>();
5755  if (state.nStallingSince &&
5756  state.nStallingSince < count_microseconds(current_time) -
5757  1000000 * BLOCK_STALLING_TIMEOUT) {
5758  // Stalling only triggers when the block download window cannot
5759  // move. During normal steady state, the download window should be
5760  // much larger than the to-be-downloaded set of blocks, so
5761  // disconnection should only happen during initial block download.
5762  LogPrintf("Peer=%d is stalling block download, disconnecting\n",
5763  pto->GetId());
5764  pto->fDisconnect = true;
5765  return true;
5766  }
5767  // In case there is a block that has been in flight from this peer for 2
5768  // + 0.5 * N times the block interval (with N the number of peers from
5769  // which we're downloading validated blocks), disconnect due to timeout.
5770  // We compensate for other peers to prevent killing off peers due to our
5771  // own downstream link being saturated. We only count validated
5772  // in-flight blocks so peers can't advertise non-existing block hashes
5773  // to unreasonably increase our timeout.
5774  if (state.vBlocksInFlight.size() > 0) {
5775  QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
5776  int nOtherPeersWithValidatedDownloads =
5777  nPeersWithValidatedDownloads -
5778  (state.nBlocksInFlightValidHeaders > 0);
5779  if (count_microseconds(current_time) >
5780  state.nDownloadingSince +
5781  consensusParams.nPowTargetSpacing *
5784  nOtherPeersWithValidatedDownloads)) {
5785  LogPrintf("Timeout downloading block %s from peer=%d, "
5786  "disconnecting\n",
5787  queuedBlock.hash.ToString(), pto->GetId());
5788  pto->fDisconnect = true;
5789  return true;
5790  }
5791  }
5792 
5793  // Check for headers sync timeouts
5794  if (state.fSyncStarted &&
5795  state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
5796  // Detect whether this is a stalling initial-headers-sync peer
5797  if (pindexBestHeader->GetBlockTime() <=
5798  GetAdjustedTime() - 24 * 60 * 60) {
5799  if (count_microseconds(current_time) >
5800  state.nHeadersSyncTimeout &&
5801  nSyncStarted == 1 &&
5802  (nPreferredDownload - state.fPreferredDownload >= 1)) {
5803  // Disconnect a peer (without the noban permission) if it
5804  // is our only sync peer, and we have others we could be
5805  // using instead.
5806  // Note: If all our peers are inbound, then we won't
5807  // disconnect our sync peer for stalling; we have bigger
5808  // problems if we can't get any outbound peers.
5809  if (!pto->HasPermission(PF_NOBAN)) {
5810  LogPrintf("Timeout downloading headers from peer=%d, "
5811  "disconnecting\n",
5812  pto->GetId());
5813  pto->fDisconnect = true;
5814  return true;
5815  } else {
5816  LogPrintf("Timeout downloading headers from noban "
5817  "peer=%d, not disconnecting\n",
5818  pto->GetId());
5819  // Reset the headers sync state so that we have a chance
5820  // to try downloading from a different peer. Note: this
5821  // will also result in at least one more getheaders
5822  // message to be sent to this peer (eventually).
5823  state.fSyncStarted = false;
5824  nSyncStarted--;
5825  state.nHeadersSyncTimeout = 0;
5826  }
5827  }
5828  } else {
5829  // After we've caught up once, reset the timeout so we can't
5830  // trigger disconnect later.
5831  state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
5832  }
5833  }
5834 
5835  // Check that outbound peers have reasonable chains GetTime() is used by
5836  // this anti-DoS logic so we can test this using mocktime.
5837  ConsiderEviction(*pto, GetTime());
5838  } // release cs_main
5839 
5840  std::vector<CInv> vGetData;
5841 
5842  //
5843  // Message: getdata (blocks)
5844  //
5845  {
5846  LOCK(cs_main);
5847 
5848  CNodeState &state = *State(pto->GetId());
5849 
5850  if (!pto->fClient &&
5851  ((fFetch && !pto->m_limited_node) ||
5852  !::ChainstateActive().IsInitialBlockDownload()) &&
5853  state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
5854  std::vector<const CBlockIndex *> vToDownload;
5855  NodeId staller = -1;
5856  FindNextBlocksToDownload(pto->GetId(),
5858  state.nBlocksInFlight,
5859  vToDownload, staller, consensusParams);
5860  for (const CBlockIndex *pindex : vToDownload) {
5861  vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
5862  MarkBlockAsInFlight(config, m_mempool, pto->GetId(),
5863  pindex->GetBlockHash(), consensusParams,
5864  pindex);
5865  LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n",
5866  pindex->GetBlockHash().ToString(), pindex->nHeight,
5867  pto->GetId());
5868  }
5869  if (state.nBlocksInFlight == 0 && staller != -1) {
5870  if (State(staller)->nStallingSince == 0) {
5871  State(staller)->nStallingSince =
5872  count_microseconds(current_time);
5873  LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
5874  }
5875  }
5876  }
5877  } // release cs_main
5878 
5879  auto addGetDataAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
5880  CInv inv(type, hash);
5881  LogPrint(BCLog::NET, "Requesting %s from peer=%d\n", inv.ToString(),
5882  pto->GetId());
5883  vGetData.push_back(std::move(inv));
5884  if (vGetData.size() >= MAX_GETDATA_SZ) {
5886  pto, msgMaker.Make(NetMsgType::GETDATA, std::move(vGetData)));
5887  vGetData.clear();
5888  }
5889  };
5890 
5891  //
5892  // Message: getdata (proof)
5893  //
5894  {
5896  std::vector<std::pair<NodeId, avalanche::ProofId>> expired;
5897  auto requestable =
5898  m_proofrequest.GetRequestable(pto->GetId(), current_time, &expired);
5899  for (const auto &entry : expired) {
5901  "timeout of inflight proof %s from peer=%d\n",
5902  entry.second.ToString(), entry.first);
5903  }
5904  for (const auto &proofid : requestable) {
5905  if (!AlreadyHaveProof(proofid)) {
5906  addGetDataAndMaybeFlush(MSG_AVA_PROOF, proofid);
5907  m_proofrequest.RequestedData(
5908  pto->GetId(), proofid,
5909  current_time + PROOF_REQUEST_PARAMS.getdata_interval);
5910  } else {
5911  // We have already seen this proof, no need to download.
5912  // This is just a belt-and-suspenders, as this should
5913  // already be called whenever a transaction becomes
5914  // AlreadyHaveProof().
5915  m_proofrequest.ForgetInvId(proofid);
5916  }
5917  }
5918  } // release cs_proofrequest
5919 
5920  //
5921  // Message: getdata (transactions)
5922  //
5923  {
5924  LOCK(cs_main);
5925  std::vector<std::pair<NodeId, TxId>> expired;
5926  auto requestable =
5927  m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
5928  for (const auto &entry : expired) {
5929  LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n",
5930  entry.second.ToString(), entry.first);
5931  }
5932  for (const TxId &txid : requestable) {
5933  if (!AlreadyHaveTx(txid, m_mempool)) {
5934  addGetDataAndMaybeFlush(MSG_TX, txid);
5935  m_txrequest.RequestedData(
5936  pto->GetId(), txid,
5937  current_time + TX_REQUEST_PARAMS.getdata_interval);
5938  } else {
5939  // We have already seen this transaction, no need to download.
5940  // This is just a belt-and-suspenders, as this should already be
5941  // called whenever a transaction becomes AlreadyHaveTx().
5942  m_txrequest.ForgetInvId(txid);
5943  }
5944  }
5945 
5946  if (!vGetData.empty()) {
5947  m_connman.PushMessage(pto,
5948  msgMaker.Make(NetMsgType::GETDATA, vGetData));
5949  }
5950 
5951  //
5952  // Message: feefilter
5953  //
5954  // peers with the forcerelay permission should not filter txs to us
5955  if (pto->m_tx_relay != nullptr &&
5956  pto->GetCommonVersion() >= FEEFILTER_VERSION &&
5957  gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
5958  !pto->HasPermission(PF_FORCERELAY)) {
5959  Amount currentFilter =
5960  m_mempool
5961  .GetMinFee(
5962  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
5963  1000000)
5964  .GetFeePerK();
5965  static FeeFilterRounder g_filter_rounder{
5968  // Received tx-inv messages are discarded when the active
5969  // chainstate is in IBD, so tell the peer to not send them.
5970  currentFilter = MAX_MONEY;
5971  } else {
5972  static const Amount MAX_FILTER{
5973  g_filter_rounder.round(MAX_MONEY)};
5974  if (pto->m_tx_relay->lastSentFeeFilter == MAX_FILTER) {
5975  // Send the current filter if we sent MAX_FILTER previously
5976  // and made it out of IBD.
5977  pto->m_tx_relay->nextSendTimeFeeFilter =
5978  count_microseconds(current_time) - 1;
5979  }
5980  }
5981  if (count_microseconds(current_time) >
5982  pto->m_tx_relay->nextSendTimeFeeFilter) {
5983  Amount filterToSend = g_filter_rounder.round(currentFilter);
5984  filterToSend =
5985  std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
5986 
5987  if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) {
5989  pto,
5990  msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
5991  pto->m_tx_relay->lastSentFeeFilter = filterToSend;
5992  }
5993  pto->m_tx_relay->nextSendTimeFeeFilter =
5994  PoissonNextSend(count_microseconds(current_time),
5996  }
5997  // If the fee filter has changed substantially and it's still more
5998  // than MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then
5999  // move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
6000  else if (count_microseconds(current_time) +
6001  MAX_FEEFILTER_CHANGE_DELAY * 1000000 <
6002  pto->m_tx_relay->nextSendTimeFeeFilter &&
6003  (currentFilter <
6004  3 * pto->m_tx_relay->lastSentFeeFilter / 4 ||
6005  currentFilter >
6006  4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
6007  pto->m_tx_relay->nextSendTimeFeeFilter =
6008  count_microseconds(current_time) +
6010  }
6011  }
6012  } // release cs_main
6013  return true;
6014 }
6015 
6017 public:
6020  // orphan transactions
6021  mapOrphanTransactions.clear();
6022  mapOrphanTransactionsByPrev.clear();
6023  }
6024 };
CBlockIndex::IsValid
bool IsValid(enum BlockValidity nUpTo=BlockValidity::TRANSACTIONS) const
Check whether this block index entry is valid up to the passed validity level.
Definition: blockindex.h:195
CBlockIndex::GetBlockTime
int64_t GetBlockTime() const
Definition: blockindex.h:160
MAX_UNCONNECTING_HEADERS
static const int MAX_UNCONNECTING_HEADERS
Maximum number of unconnecting headers announcements before DoS score.
Definition: net_processing.cpp:220
CConnman::SetTryNewOutboundPeer
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:2039
CTxIn
An input of a transaction.
Definition: transaction.h:61
avalanche::VoteStatus::Rejected
@ Rejected
PeerManager::ConsiderEviction
void ConsiderEviction(CNode &pto, int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Consider evicting an outbound peer based on the amount of time they've been behind our tip.
Definition: net_processing.cpp:4997
MAX_BLOCKS_IN_TRANSIT_PER_PEER
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
Definition: net_processing.cpp:174
BlockValidationResult::BLOCK_CACHED_INVALID
@ BLOCK_CACHED_INVALID
this block was cached as being invalid and we didn't store the reason why
block.h
Config::GetMaxBlockSize
virtual uint64_t GetMaxBlockSize() const =0
LOCK2
#define LOCK2(cs1, cs2)
Definition: sync.h:244
CConnman::DisconnectNode
bool DisconnectNode(const std::string &node)
Definition: net.cpp:3097
CService
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:514
policy.h
MAX_BLOCKTXN_DEPTH
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
Definition: net_processing.cpp:196
CNode::IsOutboundOrBlockRelayConn
bool IsOutboundOrBlockRelayConn() const
Definition: net.h:1003
CTransaction::vin
const std::vector< CTxIn > vin
Definition: transaction.h:210
ComputeRequestTime
static std::chrono::microseconds ComputeRequestTime(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams, std::chrono::microseconds current_time, bool preferred)
Compute the request time for this announcement, current time plus delays for:
Definition: net_processing.cpp:1044
CNodeStateStats::vHeightInFlight
std::vector< int > vHeightInFlight
Definition: net_processing.h:242
merkleblock.h
MoneyRange
bool MoneyRange(const Amount nValue)
Definition: amount.h:176